diff --git a/components/engine/.dockerignore b/components/engine/.dockerignore new file mode 100644 index 0000000000..4a56f2e00c --- /dev/null +++ b/components/engine/.dockerignore @@ -0,0 +1,7 @@ +bundles +.gopath +vendor/pkg +.go-pkg-cache +.git +hack/integration-cli-on-swarm/integration-cli-on-swarm + diff --git a/components/engine/.github/ISSUE_TEMPLATE.md b/components/engine/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000..7362480a4a --- /dev/null +++ b/components/engine/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,64 @@ + + +**Description** + + + +**Steps to reproduce the issue:** +1. +2. +3. + +**Describe the results you received:** + + +**Describe the results you expected:** + + +**Additional information you deem important (e.g. issue happens only occasionally):** + +**Output of `docker version`:** + +``` +(paste your output here) +``` + +**Output of `docker info`:** + +``` +(paste your output here) +``` + +**Additional environment details (AWS, VirtualBox, physical, etc.):** diff --git a/components/engine/.github/PULL_REQUEST_TEMPLATE.md b/components/engine/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..426981828b --- /dev/null +++ b/components/engine/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,30 @@ + + +**- What I did** + +**- How I did it** + +**- How to verify it** + +**- Description for the changelog** + + + +**- A picture of a cute animal (not mandatory but encouraged)** + diff --git a/components/engine/.gitignore b/components/engine/.gitignore new file mode 100644 index 0000000000..3e5ed1f82b --- /dev/null +++ b/components/engine/.gitignore @@ -0,0 +1,35 @@ +# Docker project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +*.exe +*.exe~ +*.orig +*.test +.*.swp +.DS_Store +# a .bashrc may be added to customize the build environment +.bashrc +.editorconfig +.gopath/ +.go-pkg-cache/ +autogen/ +bundles/ +cmd/dockerd/dockerd +cmd/docker/docker +contrib/builder/rpm/*/changelog +dockerversion/version_autogen.go +dockerversion/version_autogen_unix.go +docs/AWS_S3_BUCKET +docs/GITCOMMIT +docs/GIT_BRANCH +docs/VERSION +docs/_build +docs/_static +docs/_templates +docs/changed-files +# generated by man/md2man-all.sh +man/man1 +man/man5 +man/man8 +vendor/pkg/ +hack/integration-cli-on-swarm/integration-cli-on-swarm diff --git a/components/engine/.mailmap b/components/engine/.mailmap new file mode 100644 index 0000000000..49275048f0 --- /dev/null +++ b/components/engine/.mailmap @@ -0,0 +1,360 @@ +# Generate AUTHORS: hack/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog + +Patrick Stapleton +Shishir Mahajan +Erwin van der Koogh +Ahmed Kamal +Tejesh Mehta +Cristian Staretu +Cristian Staretu +Cristian Staretu +Marcus Linke +Aleksandrs Fadins +Christopher Latham +Hu Keping +Wayne Chang +Chen Chao +Daehyeok Mun + + + + + + +Guillaume J. Charmes + + + + + +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp +Jérôme Petazzoni +Jérôme Petazzoni +Joffrey F +Joffrey F +Joffrey F +Tim Terhorst +Andy Smith + + + + + + + + + +Walter Stanish + +Roberto Hashioka +Konstantin Pelykh +David Sissitka +Nolan Darilek + +Benoit Chesneau +Jordan Arentsen +Daniel Garcia +Miguel Angel Fernández +Bhiraj Butala +Faiz Khan +Victor Lyuboslavsky +Jean-Baptiste Barth +Matthew Mueller + +Shih-Yuan Lee +Daniel Mizyrycki root +Jean-Baptiste Dalido + + + + + + + + + + + + + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sven Dowideit +Sven Dowideit +Sven Dowideit +Akihiro Matsushima + +Alexander Morozov +Alexander Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aaron L. Xu +Aleksa Sarai +Aleksa Sarai +Aleksa Sarai +Will Weaver +Timothy Hobbs +Nathan LeClaire +Nathan LeClaire + + + + +Matthew Heon + + + + +Francisco Carriedo + + + + +Brian Goff + +Erica Windisch +Erica Windisch + +Hollie Teal + + + +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle + + + + +Thomas LEVEIL Thomas LÉVEIL + + +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Darren Shepherd +Deshi Xiao +Deshi Xiao +Doug Davis +Giampaolo Mancini +K. Heller +Jacob Atzen +Jeff Nickoloff +Jérôme Petazzoni +John Harris +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +Kevin Feyrer +Liao Qingwei +Luke Marsden +Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mansi Nahar +Mansi Nahar +Mary Anthony +Mary Anthony moxiegirl +Mary Anthony +mattyw +Michael Spetsiotis +Nik Nyby +Peter Jaffe +resouer +AJ Bowen soulshake +AJ Bowen soulshake +Tibor Vass +Tibor Vass +Vincent Bernat +Yestin Sun +bin liu +John Howard (VM) jhowardmsft +Ankush Agarwal +Tangi COLIN tangicolin +Allen Sun +Adrien Gallouët + +Anuj Bahuguna +Anusha Ragunathan +Avi Miller +Brent Salisbury +Chander G +Chun Chen +Ying Li +Daehyeok Mun + +Daniel, Dao Quang Minh +Daniel Nephin +Dave Tucker +Doug Tangren +Frederick F. Kautz IV +Ben Golub +Harold Cooper +hsinko <21551195@zju.edu.cn> +Josh Hawn +Justin Cormack + + +Kamil Domański +Lei Jitang + +Linus Heckemann + +Lynda O'Leary + +Marianna Tessel +Michael Huettermann +Moysés Borges + +Nigel Poulton +Qiang Huang + +Boaz Shuster +Shuwei Hao + +Soshi Katsuta + +Stefan Berger + +Stephen Day + +Toli Kuznets +Tristan Carel + + + +Vincent Demeester +Vishnu Kannan +xlgao-zju xlgao +Yu Changchun y00277921 +Yu Changchun + + + + +Hao Shu Wei + + + + + + + +Shengbo Song mYmNeo +Shengbo Song + +Sylvain Bellemare + + + +Arnaud Porterie + +David M. Karr + + + +Kenfe-Mickaël Laventure + + + + + +Runshen Zhu +Tom Barlow +Xianlu Bird +Dan Feldman +Harry Zhang +Alex Chen alexchen +Alex Ellis +Alicia Lauerman +Ben Bonnefoy +Bhumika Bayani +Bingshen Wang +Chen Chuanliang +Chen Mingjie +CUI Wei cuiwei13 +Daniel Grunwell +Daniel J Walsh +Dattatraya Kumbhar +David Sheets +Diego Siqueira +Eric G. Noriega +Evelyn Xu +Felix Ruess +Gabriel Nicolas Avellaneda +Gang Qiao <1373319223@qq.com> +Gustav Sinder +Harshal Patil +Helen Xie +Hyzhou Zhy <1187766782@qq.com> +Hyzhou Zhy +Jacob Tomlinson +Jiuyue Ma +Jose Diaz-Gonzalez +Josh Eveleth +Josh Wilson +Jim Galasyn +Kevin Kern +Konstantin Gribov +Kunal Kushwaha +Lajos Papp +Lyn +Markan Patel +Michael Käufl +Michal Minář +Michael Hudson-Doyle +Mike Casas +Milind Chawre +Ma Müller +Philipp Gillé +Roberto Muñoz Fernández +Sean Lee +Shukui Yang +Stefan S. +Steve Desmond +Sun Gengze <690388648@qq.com> +Tim Zju <21651152@zju.edu.cn> +Tõnis Tiigi +Wayne Song +Wang Jie +Wang Ping +Wang Yuexiao +Wewang Xiaorenfine +Wei Wu cizixs +Ying Li +Yong Tang +Yu Chengxia +Yu Peng +Yu Peng +Yao Zaiyong +Zhenkun Bi +Zhu Kunjia diff --git a/components/engine/AUTHORS b/components/engine/AUTHORS new file mode 100644 index 0000000000..4f908078a1 --- /dev/null +++ b/components/engine/AUTHORS @@ -0,0 +1,1854 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron L. Xu +Aaron Lehmann +Aaron Welch +Aaron.L.Xu +Abel Muiño +Abhijeet Kasurde +Abhinav Ajgaonkar +Abhishek Chanda +Abin Shahab +Adam Avilla +Adam Eijdenberg +Adam Kunk +Adam Miller +Adam Mills +Adam Singer +Adam Walz +Addam Hardy +Aditi Rajagopal +Aditya +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akihiro Matsushima +Akihiro Suda +Akira Koyasu +Akshay Karle +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Chen +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Olshansky +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre Garnier +Alexandre González +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Alfred Landrum +Ali Dehghani +Alicia Lauerman +Alihan Demir +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Deng +Alvin Richards +amangoel +Amen Belayneh +Amir Goldstein +Amit Bakshi +Amit Krishnan +Amit Shukla +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew Hsu +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew McDonnell +Andrew Munsell +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Anil Madhavapeddy +Ankush Agarwal +Anonmily +Anran Qiao +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anthony Sottile +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +Arash Deshmeh +ArikaChen +Arnaud Lefebvre +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Bonnefoy +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benjamin Boudreau +Benoit Chesneau +Bernerd Schaefer +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill W +bin liu +Bingshen Wang +Blake Geno +Boaz Shuster +bobby abbott +Boris Pruessmann +Boshi Lian +boucher +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bruno Tavares +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +buddhamagnet +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander G +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Drage +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +cheney90 +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dituri +Chris Fordham +Chris Gavin +Chris Gibson +Chris Khoo +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Wahl +Chris Weyl +chrismckinnel +Christian Berendt +Christian Böhme +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +ChristoperBiscardi +Christophe Mehay +Christophe Troestler +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Coenraad Loubser +Colin Dunklau +Colin Hebert +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Corey Farrell +Cory Forsyth +cressie176 +CrimsonGlory +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +CUI Wei +Cyprian Gracz +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Williams +Daniel Antlinger +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Grunwell +Daniel Hiltgen +Daniel J Walsh +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Davanum Srinivas +Dave Barboza +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Lawrence +David Lechner +David M. Karr +David Mackey +David Mat +David Mcanulty +David McKay +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Williamson +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +decadent +deed02392 +Deng Guangxing +Deni Bertovic +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Chen +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Diego Romero +Diego Siqueira +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Ding Fei +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Kononenko +Dmitry Shyshkin +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Dingel +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Dr Nic Williams +dragon788 +Dražen Lučanin +Drew Erny +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elena Morozova +Elias Faxö +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +epeterso +Eric Barch +Eric Curtin +Eric G. Noriega +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Yang +Eric-Olivier Lamey +Erica Windisch +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik St. Martin +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Euan +Eugene Yakubovich +eugenkrizo +evalle +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evelyn Xu +Everett Toews +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Ezra Silvera +Fabian Lauer +Fabiano Rosas +Fabio Falci +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangyuan Gao <21551127@zju.edu.cn> +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felipe Oliveira +Felix Abecassis +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Fengtu Wang +Ferenc Szabo +Fernando +Fero Volar +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +fl0yd +Flavio Castelli +Flavio Crisciani +FLGMwt +Florian +Florian Klein +Florian Maier +Florian Weingarten +Florin Asavoaie +fonglh +fortinux +Foysal Iqbal +Francesc Campoy +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +frosforever +fy2462 +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Linder +Gabriel Monroy +Gabriel Nicolas Avellaneda +Gaetan de Villele +Galen Sampson +Gang Qiao +Gareth Rushgrove +Garrett Barboza +Gary Schaetz +Gaurav +gautam, prasanna +Gaël PORTAY +Genki Takiuchi +GennadySpb +Geoffrey Bachelet +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gerwim +Giampaolo Mancini +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Pflaum +Greg Thornton +Grzegorz Jaśkiewicz +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Gurjeet Singh +Guruprasad +Gustav Sinder +gwx296173 +Günter Zöchbauer +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +Harshal Patil +Harshal Patil +He Simei +He Xin +heartlock <21521209@zju.edu.cn> +Hector Castro +Helen Xie +Henning Sprang +Hobofan +Hollie Teal +Hong Xu +Hongbin Lu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +hyp3rdino +Hyzhou Zhy +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Campbell +Ian Lee +Ian Main +Ian Philpot +Ian Truslove +Iavael +Icaro Seara +Ignacio Capurro +Igor Dolzhikov +Iliana Weller +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +ILYA Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +Ivan Markin +J Bruni +J. Nunn +Jack Danger Canty +Jacob Atzen +Jacob Edelman +Jacob Tomlinson +Jake Champlin +Jake Moshenko +Jake Sanders +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nesbitt +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Janonymous +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Christophe Berthon +Jean-Paul Calderone +Jean-Pierre Huynh +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Chambers +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeremy Yallop +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +jgeiger +Jhon Honce +Ji.Zhilong +Jian Zhang +jianbosun +Jie Luo +Jilles Oldenbeuving +Jim Alateras +Jim Galasyn +Jim Minter +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +Jimmy Song +jimmyxian +Jinsoo Park +Jiri Popelka +Jiuyue Ma +Jiří Župka +jjy +jmzwcn +Joao Fernandes +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Harris +John Howard (VM) +John Laswell +John Maguire +John Mulhausen +John OBrien III +John Starks +John Stephens +John Tims +John V. Martinez +John Warwick +John Willis +Jon Johnson +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Jonh Wendell +Joni Sar +Joost Cassee +Jordan +Jordan Arentsen +Jordan Sissel +Jorge Marin +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Joseph Rothrock +Josh +Josh Bodah +Josh Chorlton +Josh Eveleth +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josh Wilson +Josiah Kiehl +José Tomás Albornoz +JP +jrabbit +jroenf +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Kassar +Julien Maitrehenry +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +K. Heller +Kai Blin +Kai Qiang Wu(Kennan) +Kamil Domański +kamjar gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Karthik Nayak +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +Kay Yan +kayrus +Ke Li +Ke Xu +Kei Ohmura +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Burke +Kevin Clark +Kevin Feyrer +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Kern +Kevin Menard +Kevin P. Kucharczyk +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +kevinmeredith +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill Kolyshkin +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin Gribov +Konstantin L +Konstantin Pelykh +Krasi Georgiev +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +krrg +Kun Zhang +Kunal Kushwaha +Kyle Conroy +Kyle Linden +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Lars-Magnus Skog +Laszlo Meszaros +Laura Frank +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee Chao <932819864@qq.com> +Lee, Meng-Han +leeplay +Lei Jitang +Len Weincier +Lennie +Leo Gallucci +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Daly +Lewis Marshall +Lewis Peckover +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +Liao Qingwei +Lily Guo +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +liwenqi +lixiaobing10051267 +Liz Zhang +LIZAO LI +Lizzie Dixon <_@lizzie.io> +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Louis Opter +Luca Favatella +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Luciano Mores +Luis Martínez de Bartolomé Izquierdo +Luiz Svoboda +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +Luke Marsden +Lyn +Lynda O'Leary +Lénaïc Huard +Ma Müller +Ma Shimiao +Mabin +Madhan Raj Mookkandy +Madhav Puri +Madhu Venugopal +Mageee +Mahesh Tiyyagura +malnick +Malte Janduda +Manfred Touron +Manfred Zabarauskas +Manjunath A Kumatagi +Mansi Nahar +Manuel Meurer +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcelo Salazar +Marco Hennings +Marcus Cobden +Marcus Farkas +Marcus Linke +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark Milstein +Mark Parker +Mark West +Markan Patel +Marko Mikulicic +Marko Tibold +Markus Fix +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Redmond +Mary Anthony +Masahito Zembutsu +Masayuki Morita +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Le Marec - Pasquet +Mathieu Parent +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Richardson +Matt Robenolt +Matthew Heon +Matthew Lapworth +Matthew Mayer +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +mattymo +mattyw +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Mert Yazıcıoğlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Irwin +Michael Käufl +Michael Neale +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minář +Michal Wieczorek +Michaël Pailloncy +Michał Czeraszkiewicz +Michiel@unhosted +Mickaël FORTUNATO +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Gaffney +Mike Goelzer +Mike Leone +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Milind Chawre +Miloslav Trmač +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mqliang +Mrunal Patel +msabansal +mschurenko +Muayyad Alsadi +muge +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Neal McBurnett +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +nick +Nick DeCoursin +Nick Irvine +Nick Parker +Nick Payne +Nick Stenning +Nick Stinemates +NickrenREN +Nicola Kabar +Nicolas Borboën +Nicolas De loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolás Hock Isaza +Nigel Poulton +Nik Nyby +Nikhil Chawla +NikolaMandic +Nikolas Garofil +Nikolay Milovanov +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +Noah Treuhaft +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +OddBloke +odk- +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +orkaa +Oskar Niburski +Otto Kekäläinen +Ovidio Mallo +oyld +ozlerhakan +paetling +pandrew +panticz +Paolo G. Giarrusso +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Hemmer +Patrick Stapleton +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Kehrer +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Ribeiro +Pavel Lobashov +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Peeyush Gupta +Peggy Li +Pei Su +Penghan Wang +Per Weijnitz +perhapszzy@sina.com +Peter Bourgon +Peter Braden +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Jaffe +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Švihlík +Phil +Phil Estes +Phil Spitler +Philip Monroe +Philipp Gillé +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +phineas +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Prasanna Gautam +Pratik Karki +Prayag Verma +Przemek Hejman +Pure White +pysqz +qhuang +Qiang Huang +Qinglan Peng +qudongfang +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Reficul +Regan McCooey +Remi Rampin +Remy Suen +Renato Riccieri Santos Zannon +resouer +rgstephens +Rhys Hiltner +Ricardo N Feliciano +Rich Moyse +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Stern +Robert Terhaar +Robert Wallis +Roberto G. Hashioka +Roberto Muñoz Fernández +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Rojin George +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Strashkin +Ron Smits +Ron Williams +root +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Rozhnov Alexandr +rsmoorthy +Rudolph Gottesheim +Rui Lopes +Runshen Zhu +Ryan Abrams +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan Liu +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +Ryan Zhang +ryancooper7 +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +sakeven +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sandeep Bansal +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Satnam Singh +satoru +Satoshi Amemiya +Satoshi Tagomori +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean Lee +Sean McIntyre +Sean OMeara +Sean P. Kane +Sean Rodman +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sergii Kabashniuk +Serhat Gülçiçek +Sevki Hasirci +Shane Canon +Shane da Silva +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shayne Wang +Shekhar Gulati +Sheng Yang +Shengbo Song +Shev Yan +Shih-Yuan Lee +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +shuai-z +Shukui Yang +Shuwei Hao +Sian Lerk Lau +sidharthamani +Silas Sewell +Silvan Jegen +Simei He +Simon Eskildsen +Simon Ferquel +Simon Leinen +Simon Taranto +Sindhu S +Sjoerd Langkemper +skaasten +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +srinsriv +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan S. +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Desmond +Steve Dougherty +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Hartland +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Sun Gengze <690388648@qq.com> +Sunny Gogoi +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +Tabakhase +Tadej Janež +TAGOMORI Satoshi +tang0th +Tangi COLIN +Tatsuki Sugiura +Tatsushi Inagaki +Taylor Jones +tbonza +Ted M. Young +Tehmasp Chaudhri +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas LEVEIL +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +Tim Zju <21651152@zju.edu.cn> +timfeirg +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Booth +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom Wilkie +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonny Xu +Tony Abboud +Tony Daws +Tony Miller +toogley +Torstein Husebø +Tõnis Tiigi +tpng +tracylihui <793912329@qq.com> +Trapier Marshall +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +Trevor Sullivan +trishnaguha +Tristan Carel +Troy Denton +Tyler Brock +Tzu-Jung Lee +uhayate +Ulysse Carion +unknown +vagrant +Vaidas Jablonskis +Veres Lajos +vgeta +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitaly Ostrosablin +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +WANG Chao +Wang Jie +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Ward Vandewege +WarheadsSE +Wayne Chang +Wayne Song +Wei Wu +Wei-Ting Kuo +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenkai Yin +Wentao Zhang +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Martin +William Riancho +William Thurston +WiseTrem +wlan0 +Wolfgang Powisch +wonderflow +Wonjun Kim +xamyzhao +Xianglin Gao +Xianlu Bird +XiaoBing Jiang +Xiaoxu Chen +xiekeyang +Xinbo Weng +Xinzi Zhou +Xiuming Chen +xlgao-zju +xuzhaokui +Yahya +YAMADA Tsuyoshi +Yan Feng +Yang Bai +Yanqiang Miao +Yao Zaiyong +Yasunori Mahata +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +yorkie +You-Sheng Yang (楊有勝) +Youcef YEKHLEF +Yu Changchun +Yu Chengxia +Yu Peng +Yuan Sun +Yuanhong Peng +Yunxiang Huang +Yurii Rashkovskii +yuzou +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +zhangxianwei +Zhenan Ye <21551168@zju.edu.cn> +zhenghenghuo +Zhenkun Bi +zhouhao +Zhu Guihua +Zhu Kunjia +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +徐俊杰 +搏通 diff --git a/components/engine/CHANGELOG.md b/components/engine/CHANGELOG.md new file mode 100644 index 0000000000..26185624e0 --- /dev/null +++ b/components/engine/CHANGELOG.md @@ -0,0 +1,3501 @@ +# Changelog + +Items starting with `DEPRECATE` are important deprecation notices. For more +information on the list of deprecated flags and APIs please have a look at +https://docs.docker.com/engine/deprecated/ where target removal dates can also +be found. + +## 17.04.0-ce (2017-04-05) + +### Builder + +* Disable container logging for build containers [#29552](https://github.com/docker/docker/pull/29552) +* Fix use of `**/` in `.dockerignore` [#29043](https://github.com/docker/docker/pull/29043) + +### Client + ++ Sort `docker stack ls` by name [#31085](https://github.com/docker/docker/pull/31085) ++ Flags for specifying bind mount consistency [#31047](https://github.com/docker/docker/pull/31047) +* Output of docker CLI --help is now wrapped to the terminal width [#28751](https://github.com/docker/docker/pull/28751) +* Suppress image digest in docker ps [#30848](https://github.com/docker/docker/pull/30848) +* Hide command options that are related to Windows [#30788](https://github.com/docker/docker/pull/30788) +* Fix `docker plugin install` prompt to accept "enter" for the "N" default [#30769](https://github.com/docker/docker/pull/30769) ++ Add `truncate` function for Go templates [#30484](https://github.com/docker/docker/pull/30484) +* Support expanded syntax of ports in `stack deploy` [#30476](https://github.com/docker/docker/pull/30476) +* Support expanded syntax of mounts in `stack deploy` [#30597](https://github.com/docker/docker/pull/30597) [#31795](https://github.com/docker/docker/pull/31795) ++ Add `--add-host` for docker build [#30383](https://github.com/docker/docker/pull/30383) ++ Add `.CreatedAt` placeholder for `docker network ls --format` [#29900](https://github.com/docker/docker/pull/29900) +* Update order of `--secret-rm` and `--secret-add` [#29802](https://github.com/docker/docker/pull/29802) ++ Add `--filter enabled=true` for `docker plugin ls` [#28627](https://github.com/docker/docker/pull/28627) ++ Add `--format` to `docker service ls` [#28199](https://github.com/docker/docker/pull/28199) ++ Add `publish` and `expose` filter for `docker ps --filter` [#27557](https://github.com/docker/docker/pull/27557) +* Support multiple service IDs on `docker service ps` [#25234](https://github.com/docker/docker/pull/25234) ++ Allow swarm join with `--availability=drain` [#24993](https://github.com/docker/docker/pull/24993) +* Docker inspect now shows "docker-default" when AppArmor is enabled and no other profile was defined [#27083](https://github.com/docker/docker/pull/27083) + +### Logging + ++ Implement optional ring buffer for container logs [#28762](https://github.com/docker/docker/pull/28762) ++ Add `--log-opt awslogs-create-group=` for awslogs (CloudWatch) to support creation of log groups as needed [#29504](https://github.com/docker/docker/pull/29504) +- Fix segfault when using the gcplogs logging driver with a "static" binary [#29478](https://github.com/docker/docker/pull/29478) + + +### Networking + +* Check parameter `--ip`, `--ip6` and `--link-local-ip` in `docker network connect` [#30807](https://github.com/docker/docker/pull/30807) ++ Added support for `dns-search` [#30117](https://github.com/docker/docker/pull/30117) ++ Added --verbose option for docker network inspect to show task details from all swarm nodes [#31710](https://github.com/docker/docker/pull/31710) +* Clear stale datapath encryption states when joining the cluster [docker/libnetwork#1354](https://github.com/docker/libnetwork/pull/1354) ++ Ensure iptables initialization only happens once [docker/libnetwork#1676](https://github.com/docker/libnetwork/pull/1676) +* Fix bad order of iptables filter rules [docker/libnetwork#961](https://github.com/docker/libnetwork/pull/961) ++ Add anonymous container alias to service record on attachable network [docker/libnetwork#1651](https://github.com/docker/libnetwork/pull/1651) ++ Support for `com.docker.network.container_interface_prefix` driver label [docker/libnetwork#1667](https://github.com/docker/libnetwork/pull/1667) ++ Improve network list performance by omitting network details that are not used [#30673](https://github.com/docker/docker/pull/30673) + +### Runtime + +* Handle paused container when restoring without live-restore set [#31704](https://github.com/docker/docker/pull/31704) +- Do not allow sub second in healthcheck options in Dockerfile [#31177](https://github.com/docker/docker/pull/31177) +* Support name and id prefix in `secret update` [#30856](https://github.com/docker/docker/pull/30856) +* Use binary frame for websocket attach endpoint [#30460](https://github.com/docker/docker/pull/30460) +* Fix linux mount calls not applying propagation type changes [#30416](https://github.com/docker/docker/pull/30416) +* Fix ExecIds leak on failed `exec -i` [#30340](https://github.com/docker/docker/pull/30340) +* Prune named but untagged images if `danglingOnly=true` [#30330](https://github.com/docker/docker/pull/30330) ++ Add daemon flag to set `no_new_priv` as default for unprivileged containers [#29984](https://github.com/docker/docker/pull/29984) ++ Add daemon option `--default-shm-size` [#29692](https://github.com/docker/docker/pull/29692) ++ Support registry mirror config reload [#29650](https://github.com/docker/docker/pull/29650) +- Ignore the daemon log config when building images [#29552](https://github.com/docker/docker/pull/29552) +* Move secret name or ID prefix resolving from client to daemon [#29218](https://github.com/docker/docker/pull/29218) ++ Allow adding rules to `cgroup devices.allow` on container create/run [#22563](https://github.com/docker/docker/pull/22563) +- Fix `cpu.cfs_quota_us` being reset when running `systemd daemon-reload` [#31736](https://github.com/docker/docker/pull/31736) + +### Swarm Mode + ++ Topology-aware scheduling [#30725](https://github.com/docker/docker/pull/30725) ++ Automatic service rollback on failure [#31108](https://github.com/docker/docker/pull/31108) ++ Worker and manager on the same node are now connected through a UNIX socket [docker/swarmkit#1828](https://github.com/docker/swarmkit/pull/1828), [docker/swarmkit#1850](https://github.com/docker/swarmkit/pull/1850), [docker/swarmkit#1851](https://github.com/docker/swarmkit/pull/1851) +* Improve raft transport package [docker/swarmkit#1748](https://github.com/docker/swarmkit/pull/1748) +* No automatic manager shutdown on demotion/removal [docker/swarmkit#1829](https://github.com/docker/swarmkit/pull/1829) +* Use TransferLeadership to make leader demotion safer [docker/swarmkit#1939](https://github.com/docker/swarmkit/pull/1939) +* Decrease default monitoring period [docker/swarmkit#1967](https://github.com/docker/swarmkit/pull/1967) ++ Add Service logs formatting [#31672](https://github.com/docker/docker/pull/31672) +* Fix service logs API to be able to specify stream [#31313](https://github.com/docker/docker/pull/31313) ++ Add `--stop-signal` for `service create` and `service update` [#30754](https://github.com/docker/docker/pull/30754) ++ Add `--read-only` for `service create` and `service update` [#30162](https://github.com/docker/docker/pull/30162) ++ Renew the context after communicating with the registry [#31586](https://github.com/docker/docker/pull/31586) ++ (experimental) Add `--tail` and `--since` options to `docker service logs` [#31500](https://github.com/docker/docker/pull/31500) ++ (experimental) Add `--no-task-ids` and `--no-trunc` options to `docker service logs` [#31672](https://github.com/docker/docker/pull/31672) + +### Windows + +* Block pulling Windows images on non-Windows daemons [#29001](https://github.com/docker/docker/pull/29001) + +## 17.03.1-ce (2017-03-27) + +### Remote API (v1.27) & Client + +* Fix autoremove on older api [#31692](https://github.com/docker/docker/pull/31692) +* Fix default network customization for a stack [#31258](https://github.com/docker/docker/pull/31258/) +* Correct CPU usage calculation in presence of offline CPUs and newer Linux [#31802](https://github.com/docker/docker/pull/31802) +* Fix issue where service healthcheck is `{}` in remote API [#30197](https://github.com/docker/docker/pull/30197) + +### Runtime + +* Update runc to 54296cf40ad8143b62dbcaa1d90e520a2136ddfe [#31666](https://github.com/docker/docker/pull/31666) + * Ignore cgroup2 mountpoints [opencontainers/runc#1266](https://github.com/opencontainers/runc/pull/1266) +* Update containerd to 4ab9917febca54791c5f071a9d1f404867857fcc [#31662](https://github.com/docker/docker/pull/31662) [#31852](https://github.com/docker/docker/pull/31852) + * Register healtcheck service before calling restore() [docker/containerd#609](https://github.com/docker/containerd/pull/609) +* Fix `docker exec` not working after unattended upgrades that reload apparmor profiles [#31773](https://github.com/docker/docker/pull/31773) +* Fix unmounting layer without merge dir with Overlay2 [#31069](https://github.com/docker/docker/pull/31069) +* Do not ignore "volume in use" errors when force-delete [#31450](https://github.com/docker/docker/pull/31450) + +### Swarm Mode + +* Update swarmkit to 17756457ad6dc4d8a639a1f0b7a85d1b65a617bb [#31807](https://github.com/docker/docker/pull/31807) + * Scheduler now correctly considers tasks which have been assigned to a node but aren't yet running [docker/swarmkit#1980](https://github.com/docker/swarmkit/pull/1980) + * Allow removal of a network when only dead tasks reference it [docker/swarmkit#2018](https://github.com/docker/swarmkit/pull/2018) + * Retry failed network allocations less aggressively [docker/swarmkit#2021](https://github.com/docker/swarmkit/pull/2021) + * Avoid network allocation for tasks that are no longer running [docker/swarmkit#2017](https://github.com/docker/swarmkit/pull/2017) + * Bookkeeping fixes inside network allocator allocator [docker/swarmkit#2019](https://github.com/docker/swarmkit/pull/2019) [docker/swarmkit#2020](https://github.com/docker/swarmkit/pull/2020) + +### Windows + +* Cleanup HCS on restore [#31503](https://github.com/docker/docker/pull/31503) + +## 17.03.0-ce (2017-03-01) + +**IMPORTANT**: Starting with this release, Docker is on a monthly release cycle and uses a +new YY.MM versioning scheme to reflect this. Two channels are available: monthly and quarterly. +Any given monthly release will only receive security and bugfixes until the next monthly +release is available. Quarterly releases receive security and bugfixes for 4 months after +initial release. This release includes bugfixes for 1.13.1 but +there are no major feature additions and the API version stays the same. +Upgrading from Docker 1.13.1 to 17.03.0 is expected to be simple and low-risk. + +### Client + +* Fix panic in `docker stats --format` [#30776](https://github.com/docker/docker/pull/30776) + +### Contrib + +* Update various `bash` and `zsh` completion scripts [#30823](https://github.com/docker/docker/pull/30823), [#30945](https://github.com/docker/docker/pull/30945) and more... +* Block obsolete socket families in default seccomp profile - mitigates unpatched kernels' CVE-2017-6074 [#29076](https://github.com/docker/docker/pull/29076) + +### Networking + +* Fix bug on overlay encryption keys rotation in cross-datacenter swarm [#30727](https://github.com/docker/docker/pull/30727) +* Fix side effect panic in overlay encryption and network control plane communication failure ("No installed keys could decrypt the message") on frequent swarm leader re-election [#25608](https://github.com/docker/docker/pull/25608) +* Several fixes around system responsiveness and datapath programming when using overlay network with external kv-store [docker/libnetwork#1639](https://github.com/docker/libnetwork/pull/1639), [docker/libnetwork#1632](https://github.com/docker/libnetwork/pull/1632) and more... +* Discard incoming plain vxlan packets for encrypted overlay network [#31170](https://github.com/docker/docker/pull/31170) +* Release the network attachment on allocation failure [#31073](https://github.com/docker/docker/pull/31073) +* Fix port allocation when multiple published ports map to the same target port [docker/swarmkit#1835](https://github.com/docker/swarmkit/pull/1835) + +### Runtime + +* Fix a deadlock in docker logs [#30223](https://github.com/docker/docker/pull/30223) +* Fix cpu spin waiting for log write events [#31070](https://github.com/docker/docker/pull/31070) +* Fix a possible crash when using journald [#31231](https://github.com/docker/docker/pull/31231) [#31263](https://github.com/docker/docker/pull/31263) +* Fix a panic on close of nil channel [#31274](https://github.com/docker/docker/pull/31274) +* Fix duplicate mount point for `--volumes-from` in `docker run` [#29563](https://github.com/docker/docker/pull/29563) +* Fix `--cache-from` does not cache last step [#31189](https://github.com/docker/docker/pull/31189) + +### Swarm Mode + +* Shutdown leaks an error when the container was never started [#31279](https://github.com/docker/docker/pull/31279) +* Fix possibility of tasks getting stuck in the "NEW" state during a leader failover [docker/swarmkit#1938](https://github.com/docker/swarmkit/pull/1938) +* Fix extraneous task creations for global services that led to confusing replica counts in `docker service ls` [docker/swarmkit#1957](https://github.com/docker/swarmkit/pull/1957) +* Fix problem that made rolling updates slow when `task-history-limit` was set to 1 [docker/swarmkit#1948](https://github.com/docker/swarmkit/pull/1948) +* Restart tasks elsewhere, if appropriate, when they are shut down as a result of nodes no longer satisfying constraints [docker/swarmkit#1958](https://github.com/docker/swarmkit/pull/1958) +* (experimental) + +## 1.13.1 (2017-02-08) + +**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, +the `overlay2`, or `overlay` is now used by default (if the kernel supports it). +To use devicemapper, you can manually configure the storage driver to use through +the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` +configuration file. + +**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental +version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 +_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. + +If you have already upgraded to Docker 1.13 without uninstalling +previously-installed plugins, you may see this message when the Docker daemon +starts: + + Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv + +To manually remove all plugins and resolve this problem, take the following steps: + +1. Remove plugins.json from: `/var/lib/docker/plugins/`. +2. Restart Docker. Verify that the Docker daemon starts with no errors. +3. Reinstall your plugins. + +### Contrib + +* Do not require a custom build of tini [#28454](https://github.com/docker/docker/pull/28454) +* Upgrade to Go 1.7.5 [#30489](https://github.com/docker/docker/pull/30489) + +### Remote API (v1.26) & Client + ++ Support secrets in docker stack deploy with compose file [#30144](https://github.com/docker/docker/pull/30144) + +### Runtime + +* Fix size issue in `docker system df` [#30378](https://github.com/docker/docker/pull/30378) +* Fix error on `docker inspect` when Swarm certificates were expired. [#29246](https://github.com/docker/docker/pull/29246) +* Fix deadlock on v1 plugin with activate error [#30408](https://github.com/docker/docker/pull/30408) +* Fix SELinux regression [#30649](https://github.com/docker/docker/pull/30649) + +### Plugins + +* Support global scoped network plugins (v2) in swarm mode [#30332](https://github.com/docker/docker/pull/30332) ++ Add `docker plugin upgrade` [#29414](https://github.com/docker/docker/pull/29414) + +### Windows + +* Fix small regression with old plugins in Windows [#30150](https://github.com/docker/docker/pull/30150) +* Fix warning on Windows [#30730](https://github.com/docker/docker/pull/30730) + +## 1.13.0 (2017-01-18) + +**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, +the `overlay2`, or `overlay` is now used by default (if the kernel supports it). +To use devicemapper, you can manually configure the storage driver to use through +the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` +configuration file. + +**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental +version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 +_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. + +If you have already upgraded to Docker 1.13 without uninstalling +previously-installed plugins, you may see this message when the Docker daemon +starts: + + Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv + +To manually remove all plugins and resolve this problem, take the following steps: + +1. Remove plugins.json from: `/var/lib/docker/plugins/`. +2. Restart Docker. Verify that the Docker daemon starts with no errors. +3. Reinstall your plugins. + +### Builder + ++ Add capability to specify images used as a cache source on build. These images do not need to have local parent chain and can be pulled from other registries [#26839](https://github.com/docker/docker/pull/26839) ++ (experimental) Add option to squash image layers to the FROM image after successful builds [#22641](https://github.com/docker/docker/pull/22641) +* Fix dockerfile parser with empty line after escape [#24725](https://github.com/docker/docker/pull/24725) +- Add step number on `docker build` [#24978](https://github.com/docker/docker/pull/24978) ++ Add support for compressing build context during image build [#25837](https://github.com/docker/docker/pull/25837) ++ add `--network` to `docker build` [#27702](https://github.com/docker/docker/pull/27702) +- Fix inconsistent behavior between `--label` flag on `docker build` and `docker run` [#26027](https://github.com/docker/docker/issues/26027) +- Fix image layer inconsistencies when using the overlay storage driver [#27209](https://github.com/docker/docker/pull/27209) +* Unused build-args are now allowed. A warning is presented instead of an error and failed build [#27412](https://github.com/docker/docker/pull/27412) +- Fix builder cache issue on Windows [#27805](https://github.com/docker/docker/pull/27805) ++ Allow `USER` in builder on Windows [#28415](https://github.com/docker/docker/pull/28415) ++ Handle env case-insensitive on Windows [#28725](https://github.com/docker/docker/pull/28725) + +### Contrib + ++ Add support for building docker debs for Ubuntu 16.04 Xenial on PPC64LE [#23438](https://github.com/docker/docker/pull/23438) ++ Add support for building docker debs for Ubuntu 16.04 Xenial on s390x [#26104](https://github.com/docker/docker/pull/26104) ++ Add support for building docker debs for Ubuntu 16.10 Yakkety Yak on PPC64LE [#28046](https://github.com/docker/docker/pull/28046) +- Add RPM builder for VMWare Photon OS [#24116](https://github.com/docker/docker/pull/24116) ++ Add shell completions to tgz [#27735](https://github.com/docker/docker/pull/27735) +* Update the install script to allow using the mirror in China [#27005](https://github.com/docker/docker/pull/27005) ++ Add DEB builder for Ubuntu 16.10 Yakkety Yak [#27993](https://github.com/docker/docker/pull/27993) ++ Add RPM builder for Fedora 25 [#28222](https://github.com/docker/docker/pull/28222) ++ Add `make deb` support for aarch64 [#27625](https://github.com/docker/docker/pull/27625) + +### Distribution + +* Update notary dependency to 0.4.2 (full changelogs [here](https://github.com/docker/notary/releases/tag/v0.4.2)) [#27074](https://github.com/docker/docker/pull/27074) + - Support for compilation on windows [docker/notary#970](https://github.com/docker/notary/pull/970) + - Improved error messages for client authentication errors [docker/notary#972](https://github.com/docker/notary/pull/972) + - Support for finding keys that are anywhere in the `~/.docker/trust/private` directory, not just under `~/.docker/trust/private/root_keys` or `~/.docker/trust/private/tuf_keys` [docker/notary#981](https://github.com/docker/notary/pull/981) + - Previously, on any error updating, the client would fall back on the cache. Now we only do so if there is a network error or if the server is unavailable or missing the TUF data. Invalid TUF data will cause the update to fail - for example if there was an invalid root rotation. [docker/notary#982](https://github.com/docker/notary/pull/982) + - Improve root validation and yubikey debug logging [docker/notary#858](https://github.com/docker/notary/pull/858) [docker/notary#891](https://github.com/docker/notary/pull/891) + - Warn if certificates for root or delegations are near expiry [docker/notary#802](https://github.com/docker/notary/pull/802) + - Warn if role metadata is near expiry [docker/notary#786](https://github.com/docker/notary/pull/786) + - Fix passphrase retrieval attempt counting and terminal detection [docker/notary#906](https://github.com/docker/notary/pull/906) +- Avoid unnecessary blob uploads when different users push same layers to authenticated registry [#26564](https://github.com/docker/docker/pull/26564) +* Allow external storage for registry credentials [#26354](https://github.com/docker/docker/pull/26354) + +### Logging + +* Standardize the default logging tag value in all logging drivers [#22911](https://github.com/docker/docker/pull/22911) +- Improve performance and memory use when logging of long log lines [#22982](https://github.com/docker/docker/pull/22982) ++ Enable syslog driver for windows [#25736](https://github.com/docker/docker/pull/25736) ++ Add Logentries Driver [#27471](https://github.com/docker/docker/pull/27471) ++ Update of AWS log driver to support tags [#27707](https://github.com/docker/docker/pull/27707) ++ Unix socket support for fluentd [#26088](https://github.com/docker/docker/pull/26088) +* Enable fluentd logging driver on Windows [#28189](https://github.com/docker/docker/pull/28189) +- Sanitize docker labels when used as journald field names [#23725](https://github.com/docker/docker/pull/23725) +- Fix an issue where `docker logs --tail` returned less lines than expected [#28203](https://github.com/docker/docker/pull/28203) +- Splunk Logging Driver: performance and reliability improvements [#26207](https://github.com/docker/docker/pull/26207) +- Splunk Logging Driver: configurable formats and skip for verifying connection [#25786](https://github.com/docker/docker/pull/25786) + +### Networking + ++ Add `--attachable` network support to enable `docker run` to work in swarm-mode overlay network [#25962](https://github.com/docker/docker/pull/25962) ++ Add support for host port PublishMode in services using the `--publish` option in `docker service create` [#27917](https://github.com/docker/docker/pull/27917) and [#28943](https://github.com/docker/docker/pull/28943) ++ Add support for Windows server 2016 overlay network driver (requires upcoming ws2016 update) [#28182](https://github.com/docker/docker/pull/28182) +* Change the default `FORWARD` policy to `DROP` [#28257](https://github.com/docker/docker/pull/28257) ++ Add support for specifying static IP addresses for predefined network on windows [#22208](https://github.com/docker/docker/pull/22208) +- Fix `--publish` flag on `docker run` not working with IPv6 addresses [#27860](https://github.com/docker/docker/pull/27860) +- Fix inspect network show gateway with mask [#25564](https://github.com/docker/docker/pull/25564) +- Fix an issue where multiple addresses in a bridge may cause `--fixed-cidr` to not have the correct addresses [#26659](https://github.com/docker/docker/pull/26659) ++ Add creation timestamp to `docker network inspect` [#26130](https://github.com/docker/docker/pull/26130) +- Show peer nodes in `docker network inspect` for swarm overlay networks [#28078](https://github.com/docker/docker/pull/28078) +- Enable ping for service VIP address [#28019](https://github.com/docker/docker/pull/28019) + +### Plugins + +- Move plugins out of experimental [#28226](https://github.com/docker/docker/pull/28226) +- Add `--force` on `docker plugin remove` [#25096](https://github.com/docker/docker/pull/25096) +* Add support for dynamically reloading authorization plugins [#22770](https://github.com/docker/docker/pull/22770) ++ Add description in `docker plugin ls` [#25556](https://github.com/docker/docker/pull/25556) ++ Add `-f`/`--format` to `docker plugin inspect` [#25990](https://github.com/docker/docker/pull/25990) ++ Add `docker plugin create` command [#28164](https://github.com/docker/docker/pull/28164) +* Send request's TLS peer certificates to authorization plugins [#27383](https://github.com/docker/docker/pull/27383) +* Support for global-scoped network and ipam plugins in swarm-mode [#27287](https://github.com/docker/docker/pull/27287) +* Split `docker plugin install` into two API call `/privileges` and `/pull` [#28963](https://github.com/docker/docker/pull/28963) + +### Remote API (v1.25) & Client + ++ Support `docker stack deploy` from a Compose file [#27998](https://github.com/docker/docker/pull/27998) ++ (experimental) Implement checkpoint and restore [#22049](https://github.com/docker/docker/pull/22049) ++ Add `--format` flag to `docker info` [#23808](https://github.com/docker/docker/pull/23808) +* Remove `--name` from `docker volume create` [#23830](https://github.com/docker/docker/pull/23830) ++ Add `docker stack ls` [#23886](https://github.com/docker/docker/pull/23886) ++ Add a new `is-task` ps filter [#24411](https://github.com/docker/docker/pull/24411) ++ Add `--env-file` flag to `docker service create` [#24844](https://github.com/docker/docker/pull/24844) ++ Add `--format` on `docker stats` [#24987](https://github.com/docker/docker/pull/24987) ++ Make `docker node ps` default to `self` in swarm node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `--group` in `docker service create` [#25317](https://github.com/docker/docker/pull/25317) ++ Add `--no-trunc` to service/node/stack ps output [#25337](https://github.com/docker/docker/pull/25337) ++ Add Logs to `ContainerAttachOptions` so go clients can request to retrieve container logs as part of the attach process [#26718](https://github.com/docker/docker/pull/26718) ++ Allow client to talk to an older server [#27745](https://github.com/docker/docker/pull/27745) +* Inform user client-side that a container removal is in progress [#26074](https://github.com/docker/docker/pull/26074) ++ Add `Isolation` to the /info endpoint [#26255](https://github.com/docker/docker/pull/26255) ++ Add `userns` to the /info endpoint [#27840](https://github.com/docker/docker/pull/27840) +- Do not allow more than one mode be requested at once in the services endpoint [#26643](https://github.com/docker/docker/pull/26643) ++ Add capability to /containers/create API to specify mounts in a more granular and safer way [#22373](https://github.com/docker/docker/pull/22373) ++ Add `--format` flag to `network ls` and `volume ls` [#23475](https://github.com/docker/docker/pull/23475) +* Allow the top-level `docker inspect` command to inspect any kind of resource [#23614](https://github.com/docker/docker/pull/23614) ++ Add --cpus flag to control cpu resources for `docker run` and `docker create`, and add `NanoCPUs` to `HostConfig` [#27958](https://github.com/docker/docker/pull/27958) +- Allow unsetting the `--entrypoint` in `docker run` or `docker create` [#23718](https://github.com/docker/docker/pull/23718) +* Restructure CLI commands by adding `docker image` and `docker container` commands for more consistency [#26025](https://github.com/docker/docker/pull/26025) +- Remove `COMMAND` column from `service ls` output [#28029](https://github.com/docker/docker/pull/28029) ++ Add `--format` to `docker events` [#26268](https://github.com/docker/docker/pull/26268) +* Allow specifying multiple nodes on `docker node ps` [#26299](https://github.com/docker/docker/pull/26299) +* Restrict fractional digits to 2 decimals in `docker images` output [#26303](https://github.com/docker/docker/pull/26303) ++ Add `--dns-option` to `docker run` [#28186](https://github.com/docker/docker/pull/28186) ++ Add Image ID to container commit event [#28128](https://github.com/docker/docker/pull/28128) ++ Add external binaries version to docker info [#27955](https://github.com/docker/docker/pull/27955) ++ Add information for `Manager Addresses` in the output of `docker info` [#28042](https://github.com/docker/docker/pull/28042) ++ Add a new reference filter for `docker images` [#27872](https://github.com/docker/docker/pull/27872) + +### Runtime + ++ Add `--experimental` daemon flag to enable experimental features, instead of shipping them in a separate build [#27223](https://github.com/docker/docker/pull/27223) ++ Add a `--shutdown-timeout` daemon flag to specify the default timeout (in seconds) to stop containers gracefully before daemon exit [#23036](https://github.com/docker/docker/pull/23036) ++ Add `--stop-timeout` to specify the timeout value (in seconds) for individual containers to stop [#22566](https://github.com/docker/docker/pull/22566) ++ Add a new daemon flag `--userland-proxy-path` to allow configuring the userland proxy instead of using the hardcoded `docker-proxy` from `$PATH` [#26882](https://github.com/docker/docker/pull/26882) ++ Add boolean flag `--init` on `dockerd` and on `docker run` to use [tini](https://github.com/krallin/tini) a zombie-reaping init process as PID 1 [#26061](https://github.com/docker/docker/pull/26061) [#28037](https://github.com/docker/docker/pull/28037) ++ Add a new daemon flag `--init-path` to allow configuring the path to the `docker-init` binary [#26941](https://github.com/docker/docker/pull/26941) ++ Add support for live reloading insecure registry in configuration [#22337](https://github.com/docker/docker/pull/22337) ++ Add support for storage-opt size on Windows daemons [#23391](https://github.com/docker/docker/pull/23391) +* Improve reliability of `docker run --rm` by moving it from the client to the daemon [#20848](https://github.com/docker/docker/pull/20848) ++ Add support for `--cpu-rt-period` and `--cpu-rt-runtime` flags, allowing containers to run real-time threads when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel [#23430](https://github.com/docker/docker/pull/23430) +* Allow parallel stop, pause, unpause [#24761](https://github.com/docker/docker/pull/24761) / [#26778](https://github.com/docker/docker/pull/26778) +* Implement XFS quota for overlay2 [#24771](https://github.com/docker/docker/pull/24771) +- Fix partial/full filter issue in `service tasks --filter` [#24850](https://github.com/docker/docker/pull/24850) +- Allow engine to run inside a user namespace [#25672](https://github.com/docker/docker/pull/25672) +- Fix a race condition between device deferred removal and resume device, when using the devicemapper graphdriver [#23497](https://github.com/docker/docker/pull/23497) +- Add `docker stats` support in Windows [#25737](https://github.com/docker/docker/pull/25737) +- Allow using `--pid=host` and `--net=host` when `--userns=host` [#25771](https://github.com/docker/docker/pull/25771) ++ (experimental) Add metrics (Prometheus) output for basic `container`, `image`, and `daemon` operations [#25820](https://github.com/docker/docker/pull/25820) +- Fix issue in `docker stats` with `NetworkDisabled=true` [#25905](https://github.com/docker/docker/pull/25905) ++ Add `docker top` support in Windows [#25891](https://github.com/docker/docker/pull/25891) ++ Record pid of exec'd process [#27470](https://github.com/docker/docker/pull/27470) ++ Add support for looking up user/groups via `getent` [#27599](https://github.com/docker/docker/pull/27599) ++ Add new `docker system` command with `df` and `prune` subcommands for system resource management, as well as `docker {container,image,volume,network} prune` subcommands [#26108](https://github.com/docker/docker/pull/26108) [#27525](https://github.com/docker/docker/pull/27525) / [#27525](https://github.com/docker/docker/pull/27525) +- Fix an issue where containers could not be stopped or killed by setting xfs max_retries to 0 upon ENOSPC with devicemapper [#26212](https://github.com/docker/docker/pull/26212) +- Fix `docker cp` failing to copy to a container's volume dir on CentOS with devicemapper [#28047](https://github.com/docker/docker/pull/28047) +* Promote overlay(2) graphdriver [#27932](https://github.com/docker/docker/pull/27932) ++ Add `--seccomp-profile` daemon flag to specify a path to a seccomp profile that overrides the default [#26276](https://github.com/docker/docker/pull/26276) +- Fix ulimits in `docker inspect` when `--default-ulimit` is set on daemon [#26405](https://github.com/docker/docker/pull/26405) +- Add workaround for overlay issues during build in older kernels [#28138](https://github.com/docker/docker/pull/28138) ++ Add `TERM` environment variable on `docker exec -t` [#26461](https://github.com/docker/docker/pull/26461) +* Honor a container’s `--stop-signal` setting upon `docker kill` [#26464](https://github.com/docker/docker/pull/26464) + +### Swarm Mode + ++ Add secret management [#27794](https://github.com/docker/docker/pull/27794) ++ Add support for templating service options (hostname, mounts, and environment variables) [#28025](https://github.com/docker/docker/pull/28025) +* Display the endpoint mode in the output of `docker service inspect --pretty` [#26906](https://github.com/docker/docker/pull/26906) +* Make `docker service ps` output more bearable by shortening service IDs in task names [#28088](https://github.com/docker/docker/pull/28088) +* Make `docker node ps` default to the current node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `--dns`, -`-dns-opt`, and `--dns-search` to service create. [#27567](https://github.com/docker/docker/pull/27567) ++ Add `--force` to `docker service update` [#27596](https://github.com/docker/docker/pull/27596) ++ Add `--health-*` and `--no-healthcheck` flags to `docker service create` and `docker service update` [#27369](https://github.com/docker/docker/pull/27369) ++ Add `-q` to `docker service ps` [#27654](https://github.com/docker/docker/pull/27654) +* Display number of global services in `docker service ls` [#27710](https://github.com/docker/docker/pull/27710) +- Remove `--name` flag from `docker service update`. This flag is only functional on `docker service create`, so was removed from the `update` command [#26988](https://github.com/docker/docker/pull/26988) +- Fix worker nodes failing to recover because of transient networking issues [#26646](https://github.com/docker/docker/issues/26646) +* Add support for health aware load balancing and DNS records [#27279](https://github.com/docker/docker/pull/27279) ++ Add `--hostname` to `docker service create` [#27857](https://github.com/docker/docker/pull/27857) ++ Add `--host` to `docker service create`, and `--host-add`, `--host-rm` to `docker service update` [#28031](https://github.com/docker/docker/pull/28031) ++ Add `--tty` flag to `docker service create`/`update` [#28076](https://github.com/docker/docker/pull/28076) +* Autodetect, store, and expose node IP address as seen by the manager [#27910](https://github.com/docker/docker/pull/27910) +* Encryption at rest of manager keys and raft data [#27967](https://github.com/docker/docker/pull/27967) ++ Add `--update-max-failure-ratio`, `--update-monitor` and `--rollback` flags to `docker service update` [#26421](https://github.com/docker/docker/pull/26421) +- Fix an issue with address autodiscovery on `docker swarm init` running inside a container [#26457](https://github.com/docker/docker/pull/26457) ++ (experimental) Add `docker service logs` command to view logs for a service [#28089](https://github.com/docker/docker/pull/28089) ++ Pin images by digest for `docker service create` and `update` [#28173](https://github.com/docker/docker/pull/28173) +* Add short (`-f`) flag for `docker node rm --force` and `docker swarm leave --force` [#28196](https://github.com/docker/docker/pull/28196) ++ Add options to customize Raft snapshots (`--max-snapshots`, `--snapshot-interval`) [#27997](https://github.com/docker/docker/pull/27997) +- Don't repull image if pinned by digest [#28265](https://github.com/docker/docker/pull/28265) ++ Swarm-mode support for Windows [#27838](https://github.com/docker/docker/pull/27838) ++ Allow hostname to be updated on service [#28771](https://github.com/docker/docker/pull/28771) ++ Support v2 plugins [#29433](https://github.com/docker/docker/pull/29433) ++ Add content trust for services [#29469](https://github.com/docker/docker/pull/29469) + +### Volume + ++ Add support for labels on volumes [#21270](https://github.com/docker/docker/pull/21270) ++ Add support for filtering volumes by label [#25628](https://github.com/docker/docker/pull/25628) +* Add a `--force` flag in `docker volume rm` to forcefully purge the data of the volume that has already been deleted [#23436](https://github.com/docker/docker/pull/23436) +* Enhance `docker volume inspect` to show all options used when creating the volume [#26671](https://github.com/docker/docker/pull/26671) +* Add support for local NFS volumes to resolve hostnames [#27329](https://github.com/docker/docker/pull/27329) + +### Security + +- Fix selinux labeling of volumes shared in a container [#23024](https://github.com/docker/docker/pull/23024) +- Prohibit `/sys/firmware/**` from being accessed with apparmor [#26618](https://github.com/docker/docker/pull/26618) + +### Deprecation + +- Marked the `docker daemon` command as deprecated. The daemon is moved to a separate binary (`dockerd`), and should be used instead [#26834](https://github.com/docker/docker/pull/26834) +- Deprecate unversioned API endpoints [#28208](https://github.com/docker/docker/pull/28208) +- Remove Ubuntu 15.10 (Wily Werewolf) as supported platform. Ubuntu 15.10 is EOL, and no longer receives updates [#27042](https://github.com/docker/docker/pull/27042) +- Remove Fedora 22 as supported platform. Fedora 22 is EOL, and no longer receives updates [#27432](https://github.com/docker/docker/pull/27432) +- Remove Fedora 23 as supported platform. Fedora 23 is EOL, and no longer receives updates [#29455](https://github.com/docker/docker/pull/29455) +- Deprecate the `repo:shortid` syntax on `docker pull` [#27207](https://github.com/docker/docker/pull/27207) +- Deprecate backing filesystem without `d_type` for overlay and overlay2 storage drivers [#27433](https://github.com/docker/docker/pull/27433) +- Deprecate `MAINTAINER` in Dockerfile [#25466](https://github.com/docker/docker/pull/25466) +- Deprecate `filter` param for endpoint `/images/json` [#27872](https://github.com/docker/docker/pull/27872) +- Deprecate setting duplicate engine labels [#24533](https://github.com/docker/docker/pull/24533) +- Deprecate "top-level" network information in `NetworkSettings` [#28437](https://github.com/docker/docker/pull/28437) + +## 1.12.6 (2017-01-10) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or +that the IPAM driver can provide one when you specify the `--ipv6` option. + +If you are currently using the `--ipv6` option _without_ specifying the +`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the +following message: + +```none +Error starting daemon: Error initializing network controller: Error creating + default "bridge" network: failed to parse pool request + for address space "LocalDefault" pool " subpool ": + could not find an available, non-overlapping IPv6 address + pool among the defaults to assign to the network +``` + +To resolve this error, either remove the `--ipv6` flag (to preserve the same +behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the +value of the `--fixed-cidr-v6` flag. + +In a similar way, if you specify the `--ipv6` flag when creating a network +with the default IPAM driver, without providing an IPv6 `--subnet`, network +creation will fail with the following message: + +```none +Error response from daemon: failed to parse pool request for address space + "LocalDefault" pool "" subpool "": could not find an + available, non-overlapping IPv6 address pool among + the defaults to assign to the network +``` + +To resolve this, either remove the `--ipv6` flag (to preserve the same behavior +as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the +`--subnet` flag. + +The network network creation will instead succeed if you use an external IPAM driver +which supports automatic allocation of IPv6 subnets. + +### Runtime + +- Fix runC privilege escalation (CVE-2016-9962) + +## 1.12.5 (2016-12-15) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or +that the IPAM driver can provide one when you specify the `--ipv6` option. + +If you are currently using the `--ipv6` option _without_ specifying the +`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the +following message: + +```none +Error starting daemon: Error initializing network controller: Error creating + default "bridge" network: failed to parse pool request + for address space "LocalDefault" pool " subpool ": + could not find an available, non-overlapping IPv6 address + pool among the defaults to assign to the network +``` + +To resolve this error, either remove the `--ipv6` flag (to preserve the same +behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the +value of the `--fixed-cidr-v6` flag. + +In a similar way, if you specify the `--ipv6` flag when creating a network +with the default IPAM driver, without providing an IPv6 `--subnet`, network +creation will fail with the following message: + +```none +Error response from daemon: failed to parse pool request for address space + "LocalDefault" pool "" subpool "": could not find an + available, non-overlapping IPv6 address pool among + the defaults to assign to the network +``` + +To resolve this, either remove the `--ipv6` flag (to preserve the same behavior +as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the +`--subnet` flag. + +The network network creation will instead succeed if you use an external IPAM driver +which supports automatic allocation of IPv6 subnets. + +### Runtime + +- Fix race on sending stdin close event [#29424](https://github.com/docker/docker/pull/29424) + +### Networking + +- Fix panic in docker network ls when a network was created with `--ipv6` and no ipv6 `--subnet` in older docker versions [#29416](https://github.com/docker/docker/pull/29416) + +### Contrib + +- Fix compilation on Darwin [#29370](https://github.com/docker/docker/pull/29370) + +## 1.12.4 (2016-12-12) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix issue where volume metadata was not removed [#29083](https://github.com/docker/docker/pull/29083) +- Asynchronously close streams to prevent holding container lock [#29050](https://github.com/docker/docker/pull/29050) +- Fix selinux labels for newly created container volumes [#29050](https://github.com/docker/docker/pull/29050) +- Remove hostname validation [#28990](https://github.com/docker/docker/pull/28990) +- Fix deadlocks caused by IO races [#29095](https://github.com/docker/docker/pull/29095) [#29141](https://github.com/docker/docker/pull/29141) +- Return an empty stats if the container is restarting [#29150](https://github.com/docker/docker/pull/29150) +- Fix volume store locking [#29151](https://github.com/docker/docker/pull/29151) +- Ensure consistent status code in API [#29150](https://github.com/docker/docker/pull/29150) +- Fix incorrect opaque directory permission in overlay2 [#29093](https://github.com/docker/docker/pull/29093) +- Detect plugin content and error out on `docker pull` [#29297](https://github.com/docker/docker/pull/29297) + +### Swarm Mode + +* Update Swarmkit [#29047](https://github.com/docker/docker/pull/29047) + - orchestrator/global: Fix deadlock on updates [docker/swarmkit#1760](https://github.com/docker/swarmkit/pull/1760) + - on leader switchover preserve the vxlan id for existing networks [docker/swarmkit#1773](https://github.com/docker/swarmkit/pull/1773) +- Refuse swarm spec not named "default" [#29152](https://github.com/docker/docker/pull/29152) + +### Networking + +* Update libnetwork [#29004](https://github.com/docker/docker/pull/29004) [#29146](https://github.com/docker/docker/pull/29146) + - Fix panic in embedded DNS [docker/libnetwork#1561](https://github.com/docker/libnetwork/pull/1561) + - Fix unmarhalling panic when passing --link-local-ip on global scope network [docker/libnetwork#1564](https://github.com/docker/libnetwork/pull/1564) + - Fix panic when network plugin returns nil StaticRoutes [docker/libnetwork#1563](https://github.com/docker/libnetwork/pull/1563) + - Fix panic in osl.(*networkNamespace).DeleteNeighbor [docker/libnetwork#1555](https://github.com/docker/libnetwork/pull/1555) + - Fix panic in swarm networking concurrent map read/write [docker/libnetwork#1570](https://github.com/docker/libnetwork/pull/1570) + * Allow encrypted networks when running docker inside a container [docker/libnetwork#1502](https://github.com/docker/libnetwork/pull/1502) + - Do not block autoallocation of IPv6 pool [docker/libnetwork#1538](https://github.com/docker/libnetwork/pull/1538) + - Set timeout for netlink calls [docker/libnetwork#1557](https://github.com/docker/libnetwork/pull/1557) + - Increase networking local store timeout to one minute [docker/libkv#140](https://github.com/docker/libkv/pull/140) + - Fix a panic in libnetwork.(*sandbox).execFunc [docker/libnetwork#1556](https://github.com/docker/libnetwork/pull/1556) + - Honor icc=false for internal networks [docker/libnetwork#1525](https://github.com/docker/libnetwork/pull/1525) + +### Logging + +* Update syslog log driver [#29150](https://github.com/docker/docker/pull/29150) + +### Contrib + +- Run "dnf upgrade" before installing in fedora [#29150](https://github.com/docker/docker/pull/29150) +- Add build-date back to RPM packages [#29150](https://github.com/docker/docker/pull/29150) +- deb package filename changed to include distro to distinguish between distro code names [#27829](https://github.com/docker/docker/pull/27829) + +## 1.12.3 (2016-10-26) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix ambient capability usage in containers (CVE-2016-8867) [#27610](https://github.com/docker/docker/pull/27610) +- Prevent a deadlock in libcontainerd for Windows [#27136](https://github.com/docker/docker/pull/27136) +- Fix error reporting in CopyFileWithTar [#27075](https://github.com/docker/docker/pull/27075) +* Reset health status to starting when a container is restarted [#27387](https://github.com/docker/docker/pull/27387) +* Properly handle shared mount propagation in storage directory [#27609](https://github.com/docker/docker/pull/27609) +- Fix docker exec [#27610](https://github.com/docker/docker/pull/27610) +- Fix backward compatibility with containerd’s events log [#27693](https://github.com/docker/docker/pull/27693) + +### Swarm Mode + +- Fix conversion of restart-policy [#27062](https://github.com/docker/docker/pull/27062) +* Update Swarmkit [#27554](https://github.com/docker/docker/pull/27554) + * Avoid restarting a task that has already been restarted [docker/swarmkit#1305](https://github.com/docker/swarmkit/pull/1305) + * Allow duplicate published ports when they use different protocols [docker/swarmkit#1632](https://github.com/docker/swarmkit/pull/1632) + * Allow multiple randomly assigned published ports on service [docker/swarmkit#1657](https://github.com/docker/swarmkit/pull/1657) + - Fix panic when allocations happen at init time [docker/swarmkit#1651](https://github.com/docker/swarmkit/pull/1651) + +### Networking + +* Update libnetwork [#27559](https://github.com/docker/docker/pull/27559) + - Fix race in serializing sandbox to string [docker/libnetwork#1495](https://github.com/docker/libnetwork/pull/1495) + - Fix race during deletion [docker/libnetwork#1503](https://github.com/docker/libnetwork/pull/1503) + * Reset endpoint port info on connectivity revoke in bridge driver [docker/libnetwork#1504](https://github.com/docker/libnetwork/pull/1504) + - Fix a deadlock in networking code [docker/libnetwork#1507](https://github.com/docker/libnetwork/pull/1507) + - Fix a race in load balancer state [docker/libnetwork#1512](https://github.com/docker/libnetwork/pull/1512) + +### Logging + +* Update fluent-logger-golang to v1.2.1 [#27474](https://github.com/docker/docker/pull/27474) + +### Contrib + +* Update buildtags for armhf ubuntu-trusty [#27327](https://github.com/docker/docker/pull/27327) +* Add AppArmor to runc buildtags for armhf [#27421](https://github.com/docker/docker/pull/27421) + +## 1.12.2 (2016-10-11) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix a panic due to a race condition filtering `docker ps` [#26049](https://github.com/docker/docker/pull/26049) +* Implement retry logic to prevent "Unable to remove filesystem" errors when using the aufs storage driver [#26536](https://github.com/docker/docker/pull/26536) +* Prevent devicemapper from removing device symlinks if `dm.use_deferred_removal` is enabled [#24740](https://github.com/docker/docker/pull/24740) +- Fix an issue where the CLI did not return correct exit codes if a command was run with invalid options [#26777](https://github.com/docker/docker/pull/26777) +- Fix a panic due to a bug in stdout / stderr processing in health checks [#26507](https://github.com/docker/docker/pull/26507) +- Fix exec's children handling [#26874](https://github.com/docker/docker/pull/26874) +- Fix exec form of HEALTHCHECK CMD [#26208](https://github.com/docker/docker/pull/26208) + +### Networking + +- Fix a daemon start panic on armv5 [#24315](https://github.com/docker/docker/issues/24315) +* Vendor libnetwork [#26879](https://github.com/docker/docker/pull/26879) [#26953](https://github.com/docker/docker/pull/26953) + * Avoid returning early on agent join failures [docker/libnetwork#1473](https://github.com/docker/libnetwork/pull/1473) + - Fix service published port cleanup issues [docker/libetwork#1432](https://github.com/docker/libnetwork/pull/1432) [docker/libnetwork#1433](https://github.com/docker/libnetwork/pull/1433) + * Recover properly from transient gossip failures [docker/libnetwork#1446](https://github.com/docker/libnetwork/pull/1446) + * Disambiguate node names known to gossip cluster to avoid node name collision [docker/libnetwork#1451](https://github.com/docker/libnetwork/pull/1451) + * Honor user provided listen address for gossip [docker/libnetwork#1460](https://github.com/docker/libnetwork/pull/1460) + * Allow reachability via published port across services on the same host [docker/libnetwork#1398](https://github.com/docker/libnetwork/pull/1398) + * Change the ingress sandbox name from random id to just `ingress_sbox` [docker/libnetwork#1449](https://github.com/docker/libnetwork/pull/1449) + - Disable service discovery in ingress network [docker/libnetwork#1489](https://github.com/docker/libnetwork/pull/1489) + +### Swarm Mode + +* Fix remote detection of a node's address when it joins the cluster [#26211](https://github.com/docker/docker/pull/26211) +* Vendor SwarmKit [#26765](https://github.com/docker/docker/pull/26765) + * Bounce session after failed status update [docker/swarmkit#1539](https://github.com/docker/swarmkit/pull/1539) + - Fix possible raft deadlocks [docker/swarmkit#1537](https://github.com/docker/swarmkit/pull/1537) + - Fix panic and endpoint leak when a service is updated with no endpoints [docker/swarmkit#1481](https://github.com/docker/swarmkit/pull/1481) + * Produce an error if the same port is published twice on `service create` or `service update` [docker/swarmkit#1495](https://github.com/docker/swarmkit/pull/1495) + - Fix an issue where changes to a service were not detected, resulting in the service not being updated [docker/swarmkit#1497](https://github.com/docker/swarmkit/pull/1497) + - Do not allow service creation on ingress network [docker/swarmkit#1600](https://github.com/docker/swarmkit/pull/1600) + +### Contrib + +* Update the debian sysv-init script to use `dockerd` instead of `docker daemon` [#25869](https://github.com/docker/docker/pull/25869) +* Improve stability when running the docker client on MacOS Sierra [#26875](https://github.com/docker/docker/pull/26875) +- Fix installation on debian stretch [#27184](https://github.com/docker/docker/pull/27184) + +### Windows + +- Fix an issue where arrow-navigation did not work when running the docker client in ConEmu [#25578](https://github.com/docker/docker/pull/25578) + +## 1.12.1 (2016-08-18) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Client + +* Add `Joined at` information in `node inspect --pretty` [#25512](https://github.com/docker/docker/pull/25512) +- Fix a crash on `service inspect` [#25454](https://github.com/docker/docker/pull/25454) +- Fix issue preventing `service update --env-add` to work as intended [#25427](https://github.com/docker/docker/pull/25427) +- Fix issue preventing `service update --publish-add` to work as intended [#25428](https://github.com/docker/docker/pull/25428) +- Remove `service update --network-add` and `service update --network-rm` flags + because this feature is not yet implemented in 1.12, but was inadvertently added + to the client in 1.12.0 [#25646](https://github.com/docker/docker/pull/25646) + +### Contrib + ++ Official ARM installation for Debian Jessie, Ubuntu Trusty, and Raspbian Jessie [#24815](https://github.com/docker/docker/pull/24815) [#25591](https://github.com/docker/docker/pull/25637) +- Add selinux policy per distro/version, fixing issue preventing successful installation on Fedora 24, and Oracle Linux [#25334](https://github.com/docker/docker/pull/25334) [#25593](https://github.com/docker/docker/pull/25593) + +### Networking + +- Fix issue that prevented containers to be accessed by hostname with Docker overlay driver in Swarm Mode [#25603](https://github.com/docker/docker/pull/25603) [#25648](https://github.com/docker/docker/pull/25648) +- Fix random network issues on service with published port [#25603](https://github.com/docker/docker/pull/25603) +- Fix unreliable inter-service communication after scaling down and up [#25603](https://github.com/docker/docker/pull/25603) +- Fix issue where removing all tasks on a node and adding them back breaks connectivity with other services [#25603](https://github.com/docker/docker/pull/25603) +- Fix issue where a task that fails to start results in a race, causing a `network xxx not found` error that masks the actual error [#25550](https://github.com/docker/docker/pull/25550) +- Relax validation of SRV records for external services that use SRV records not formatted according to RFC 2782 [#25739](https://github.com/docker/docker/pull/25739) + +### Plugins (experimental) + +* Make daemon events listen for plugin lifecycle events [#24760](https://github.com/docker/docker/pull/24760) +* Check for plugin state before enabling plugin [#25033](https://github.com/docker/docker/pull/25033) +- Remove plugin root from filesystem on `plugin rm` [#25187](https://github.com/docker/docker/pull/25187) +- Prevent deadlock when more than one plugin is installed [#25384](https://github.com/docker/docker/pull/25384) + +### Runtime + +* Mask join tokens in daemon logs [#25346](https://github.com/docker/docker/pull/25346) +- Fix `docker ps --filter` causing the results to no longer be sorted by creation time [#25387](https://github.com/docker/docker/pull/25387) +- Fix various crashes [#25053](https://github.com/docker/docker/pull/25053) + +### Security + +* Add `/proc/timer_list` to the masked paths list to prevent information leak from the host [#25630](https://github.com/docker/docker/pull/25630) +* Allow systemd to run with only `--cap-add SYS_ADMIN` rather than having to also add `--cap-add DAC_READ_SEARCH` or disabling seccomp filtering [#25567](https://github.com/docker/docker/pull/25567) + +### Swarm + +- Fix an issue where the swarm can get stuck electing a new leader after quorum is lost [#25055](https://github.com/docker/docker/issues/25055) +- Fix unwanted rescheduling of containers after a leader failover [#25017](https://github.com/docker/docker/issues/25017) +- Change swarm root CA key to P256 curve [swarmkit#1376](https://github.com/docker/swarmkit/pull/1376) +- Allow forced removal of a node from a swarm [#25159](https://github.com/docker/docker/pull/25159) +- Fix connection leak when a node leaves a swarm [swarmkit/#1277](https://github.com/docker/swarmkit/pull/1277) +- Backdate swarm certificates by one hour to tolerate more clock skew [swarmkit/#1243](https://github.com/docker/swarmkit/pull/1243) +- Avoid high CPU use with many unschedulable tasks [swarmkit/#1287](https://github.com/docker/swarmkit/pull/1287) +- Fix issue with global tasks not starting up [swarmkit/#1295](https://github.com/docker/swarmkit/pull/1295) +- Garbage collect raft logs [swarmkit/#1327](https://github.com/docker/swarmkit/pull/1327) + +### Volume + +- Persist local volume options after a daemon restart [#25316](https://github.com/docker/docker/pull/25316) +- Fix an issue where the mount ID was not returned on volume unmount [#25333](https://github.com/docker/docker/pull/25333) +- Fix an issue where a volume mount could inadvertently create a bind mount [#25309](https://github.com/docker/docker/pull/25309) +- `docker service create --mount type=bind,...` now correctly validates if the source path exists, instead of creating it [#25494](https://github.com/docker/docker/pull/25494) + +## 1.12.0 (2016-07-28) + + +**IMPORTANT**: Docker 1.12.0 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**IMPORTANT**: With Docker 1.12, a Linux docker installation now has two +additional binaries; `dockerd`, and `docker-proxy`. If you have scripts for +installing docker, please make sure to update them accordingly. + +### Builder + ++ New `HEALTHCHECK` Dockerfile instruction to support user-defined healthchecks [#23218](https://github.com/docker/docker/pull/23218) ++ New `SHELL` Dockerfile instruction to specify the default shell when using the shell form for commands in a Dockerfile [#22489](https://github.com/docker/docker/pull/22489) ++ Add `#escape=` Dockerfile directive to support platform-specific parsing of file paths in Dockerfile [#22268](https://github.com/docker/docker/pull/22268) ++ Add support for comments in `.dockerignore` [#23111](https://github.com/docker/docker/pull/23111) +* Support for UTF-8 in Dockerfiles [#23372](https://github.com/docker/docker/pull/23372) +* Skip UTF-8 BOM bytes from `Dockerfile` and `.dockerignore` if exist [#23234](https://github.com/docker/docker/pull/23234) +* Windows: support for `ARG` to match Linux [#22508](https://github.com/docker/docker/pull/22508) +- Fix error message when building using a daemon with the bridge network disabled [#22932](https://github.com/docker/docker/pull/22932) + +### Contrib + +* Enable seccomp for Centos 7 and Oracle Linux 7 [#22344](https://github.com/docker/docker/pull/22344) +- Remove MountFlags in systemd unit to allow shared mount propagation [#22806](https://github.com/docker/docker/pull/22806) + +### Distribution + ++ Add `--max-concurrent-downloads` and `--max-concurrent-uploads` daemon flags useful for situations where network connections don't support multiple downloads/uploads [#22445](https://github.com/docker/docker/pull/22445) +* Registry operations now honor the `ALL_PROXY` environment variable [#22316](https://github.com/docker/docker/pull/22316) +* Provide more information to the user on `docker load` [#23377](https://github.com/docker/docker/pull/23377) +* Always save registry digest metadata about images pushed and pulled [#23996](https://github.com/docker/docker/pull/23996) + +### Logging + ++ Syslog logging driver now supports DGRAM sockets [#21613](https://github.com/docker/docker/pull/21613) ++ Add `--details` option to `docker logs` to also display log tags [#21889](https://github.com/docker/docker/pull/21889) ++ Enable syslog logger to have access to env and labels [#21724](https://github.com/docker/docker/pull/21724) ++ An additional syslog-format option `rfc5424micro` to allow microsecond resolution in syslog timestamp [#21844](https://github.com/docker/docker/pull/21844) +* Inherit the daemon log options when creating containers [#21153](https://github.com/docker/docker/pull/21153) +* Remove `docker/` prefix from log messages tag and replace it with `{{.DaemonName}}` so that users have the option of changing the prefix [#22384](https://github.com/docker/docker/pull/22384) + +### Networking + ++ Built-in Virtual-IP based internal and ingress load-balancing using IPVS [#23361](https://github.com/docker/docker/pull/23361) ++ Routing Mesh using ingress overlay network [#23361](https://github.com/docker/docker/pull/23361) ++ Secured multi-host overlay networking using encrypted control-plane and Data-plane [#23361](https://github.com/docker/docker/pull/23361) ++ MacVlan driver is out of experimental [#23524](https://github.com/docker/docker/pull/23524) ++ Add `driver` filter to `network ls` [#22319](https://github.com/docker/docker/pull/22319) ++ Adding `network` filter to `docker ps --filter` [#23300](https://github.com/docker/docker/pull/23300) ++ Add `--link-local-ip` flag to `create`, `run` and `network connect` to specify a container's link-local address [#23415](https://github.com/docker/docker/pull/23415) ++ Add network label filter support [#21495](https://github.com/docker/docker/pull/21495) +* Removed dependency on external KV-Store for Overlay networking in Swarm-Mode [#23361](https://github.com/docker/docker/pull/23361) +* Add container's short-id as default network alias [#21901](https://github.com/docker/docker/pull/21901) +* `run` options `--dns` and `--net=host` are no longer mutually exclusive [#22408](https://github.com/docker/docker/pull/22408) +- Fix DNS issue when renaming containers with generated names [#22716](https://github.com/docker/docker/pull/22716) +- Allow both `network inspect -f {{.Id}}` and `network inspect -f {{.ID}}` to address inconsistency with inspect output [#23226](https://github.com/docker/docker/pull/23226) + +### Plugins (experimental) + ++ New `plugin` command to manager plugins with `install`, `enable`, `disable`, `rm`, `inspect`, `set` subcommands [#23446](https://github.com/docker/docker/pull/23446) + +### Remote API (v1.24) & Client + ++ Split the binary into two: `docker` (client) and `dockerd` (daemon) [#20639](https://github.com/docker/docker/pull/20639) ++ Add `before` and `since` filters to `docker images --filter` [#22908](https://github.com/docker/docker/pull/22908) ++ Add `--limit` option to `docker search` [#23107](https://github.com/docker/docker/pull/23107) ++ Add `--filter` option to `docker search` [#22369](https://github.com/docker/docker/pull/22369) ++ Add security options to `docker info` output [#21172](https://github.com/docker/docker/pull/21172) [#23520](https://github.com/docker/docker/pull/23520) ++ Add insecure registries to `docker info` output [#20410](https://github.com/docker/docker/pull/20410) ++ Extend Docker authorization with TLS user information [#21556](https://github.com/docker/docker/pull/21556) ++ devicemapper: expose Mininum Thin Pool Free Space through `docker info` [#21945](https://github.com/docker/docker/pull/21945) +* API now returns a JSON object when an error occurs making it more consistent [#22880](https://github.com/docker/docker/pull/22880) +- Prevent `docker run -i --restart` from hanging on exit [#22777](https://github.com/docker/docker/pull/22777) +- Fix API/CLI discrepancy on hostname validation [#21641](https://github.com/docker/docker/pull/21641) +- Fix discrepancy in the format of sizes in `stats` from HumanSize to BytesSize [#21773](https://github.com/docker/docker/pull/21773) +- authz: when request is denied return forbbiden exit code (403) [#22448](https://github.com/docker/docker/pull/22448) +- Windows: fix tty-related displaying issues [#23878](https://github.com/docker/docker/pull/23878) + +### Runtime + ++ Split the userland proxy to a separate binary (`docker-proxy`) [#23312](https://github.com/docker/docker/pull/23312) ++ Add `--live-restore` daemon flag to keep containers running when daemon shuts down, and regain control on startup [#23213](https://github.com/docker/docker/pull/23213) ++ Ability to add OCI-compatible runtimes (via `--add-runtime` daemon flag) and select one with `--runtime` on `create` and `run` [#22983](https://github.com/docker/docker/pull/22983) ++ New `overlay2` graphdriver for Linux 4.0+ with multiple lower directory support [#22126](https://github.com/docker/docker/pull/22126) ++ New load/save image events [#22137](https://github.com/docker/docker/pull/22137) ++ Add support for reloading daemon configuration through systemd [#22446](https://github.com/docker/docker/pull/22446) ++ Add disk quota support for btrfs [#19651](https://github.com/docker/docker/pull/19651) ++ Add disk quota support for zfs [#21946](https://github.com/docker/docker/pull/21946) ++ Add support for `docker run --pid=container:` [#22481](https://github.com/docker/docker/pull/22481) ++ Align default seccomp profile with selected capabilities [#22554](https://github.com/docker/docker/pull/22554) ++ Add a `daemon reload` event when the daemon reloads its configuration [#22590](https://github.com/docker/docker/pull/22590) ++ Add `trace` capability in the pprof profiler to show execution traces in binary form [#22715](https://github.com/docker/docker/pull/22715) ++ Add a `detach` event [#22898](https://github.com/docker/docker/pull/22898) ++ Add support for setting sysctls with `--sysctl` [#19265](https://github.com/docker/docker/pull/19265) ++ Add `--storage-opt` flag to `create` and `run` allowing to set `size` on devicemapper [#19367](https://github.com/docker/docker/pull/19367) ++ Add `--oom-score-adjust` daemon flag with a default value of `-500` making the daemon less likely to be killed before containers [#24516](https://github.com/docker/docker/pull/24516) +* Undeprecate the `-c` short alias of `--cpu-shares` on `run`, `build`, `create`, `update` [#22621](https://github.com/docker/docker/pull/22621) +* Prevent from using aufs and overlay graphdrivers on an eCryptfs mount [#23121](https://github.com/docker/docker/pull/23121) +- Fix issues with tmpfs mount ordering [#22329](https://github.com/docker/docker/pull/22329) +- Created containers are no longer listed on `docker ps -a -f exited=0` [#21947](https://github.com/docker/docker/pull/21947) +- Fix an issue where containers are stuck in a "Removal In Progress" state [#22423](https://github.com/docker/docker/pull/22423) +- Fix bug that was returning an HTTP 500 instead of a 400 when not specifying a command on run/create [#22762](https://github.com/docker/docker/pull/22762) +- Fix bug with `--detach-keys` whereby input matching a prefix of the detach key was not preserved [#22943](https://github.com/docker/docker/pull/22943) +- SELinux labeling is now disabled when using `--privileged` mode [#22993](https://github.com/docker/docker/pull/22993) +- If volume-mounted into a container, `/etc/hosts`, `/etc/resolv.conf`, `/etc/hostname` are no longer SELinux-relabeled [#22993](https://github.com/docker/docker/pull/22993) +- Fix inconsistency in `--tmpfs` behavior regarding mount options [#22438](https://github.com/docker/docker/pull/22438) +- Fix an issue where daemon hangs at startup [#23148](https://github.com/docker/docker/pull/23148) +- Ignore SIGPIPE events to prevent journald restarts to crash docker in some cases [#22460](https://github.com/docker/docker/pull/22460) +- Containers are not removed from stats list on error [#20835](https://github.com/docker/docker/pull/20835) +- Fix `on-failure` restart policy when daemon restarts [#20853](https://github.com/docker/docker/pull/20853) +- Fix an issue with `stats` when a container is using another container's network [#21904](https://github.com/docker/docker/pull/21904) + +### Swarm Mode + ++ New `swarm` command to manage swarms with `init`, `join`, `join-token`, `leave`, `update` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#24823](https://github.com/docker/docker/pull/24823) ++ New `service` command to manage swarm-wide services with `create`, `inspect`, `update`, `rm`, `ps` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) ++ New `node` command to manage nodes with `accept`, `promote`, `demote`, `inspect`, `update`, `ps`, `ls` and `rm` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) ++ (experimental) New `stack` and `deploy` commands to manage and deploy multi-service applications [#23522](https://github.com/docker/docker/pull/23522) [#25140](https://github.com/docker/docker/pull/25140) + +### Volume + ++ Add support for local and global volume scopes (analogous to network scopes) [#22077](https://github.com/docker/docker/pull/22077) ++ Allow volume drivers to provide a `Status` field [#21006](https://github.com/docker/docker/pull/21006) ++ Add name/driver filter support for volume [#21361](https://github.com/docker/docker/pull/21361) +* Mount/Unmount operations now receives an opaque ID to allow volume drivers to differentiate between two callers [#21015](https://github.com/docker/docker/pull/21015) +- Fix issue preventing to remove a volume in a corner case [#22103](https://github.com/docker/docker/pull/22103) +- Windows: Enable auto-creation of host-path to match Linux [#22094](https://github.com/docker/docker/pull/22094) + + +### Deprecation + +* Environment variables `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` have been renamed + to `DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE` and `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE` respectively [#22574](https://github.com/docker/docker/pull/22574) +* Remove deprecated `syslog-tag`, `gelf-tag`, `fluentd-tag` log option in favor of the more generic `tag` one [#22620](https://github.com/docker/docker/pull/22620) +* Remove deprecated feature of passing HostConfig at API container start [#22570](https://github.com/docker/docker/pull/22570) +* Remove deprecated `-f`/`--force` flag on docker tag [#23090](https://github.com/docker/docker/pull/23090) +* Remove deprecated `/containers//copy` endpoint [#22149](https://github.com/docker/docker/pull/22149) +* Remove deprecated `docker ps` flags `--since` and `--before` [#22138](https://github.com/docker/docker/pull/22138) +* Deprecate the old 3-args form of `docker import` [#23273](https://github.com/docker/docker/pull/23273) + +## 1.11.2 (2016-05-31) + +### Networking + +- Fix a stale endpoint issue on overlay networks during ungraceful restart ([#23015](https://github.com/docker/docker/pull/23015)) +- Fix an issue where the wrong port could be reported by `docker inspect/ps/port` ([#22997](https://github.com/docker/docker/pull/22997)) + +### Runtime + +- Fix a potential panic when running `docker build` ([#23032](https://github.com/docker/docker/pull/23032)) +- Fix interpretation of `--user` parameter ([#22998](https://github.com/docker/docker/pull/22998)) +- Fix a bug preventing container statistics to be correctly reported ([#22955](https://github.com/docker/docker/pull/22955)) +- Fix an issue preventing container to be restarted after daemon restart ([#22947](https://github.com/docker/docker/pull/22947)) +- Fix issues when running 32 bit binaries on Ubuntu 16.04 ([#22922](https://github.com/docker/docker/pull/22922)) +- Fix a possible deadlock on image deletion and container attach ([#22918](https://github.com/docker/docker/pull/22918)) +- Fix an issue where containers fail to start after a daemon restart if they depend on a containerized cluster store ([#22561](https://github.com/docker/docker/pull/22561)) +- Fix an issue causing `docker ps` to hang on CentOS when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) +- Fix a bug preventing to `docker exec` into a container when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) + + +## 1.11.1 (2016-04-26) + +### Distribution + +- Fix schema2 manifest media type to be of type `application/vnd.docker.container.image.v1+json` ([#21949](https://github.com/docker/docker/pull/21949)) + +### Documentation + ++ Add missing API documentation for changes introduced with 1.11.0 ([#22048](https://github.com/docker/docker/pull/22048)) + +### Builder + +* Append label passed to `docker build` as arguments as an implicit `LABEL` command at the end of the processed `Dockerfile` ([#22184](https://github.com/docker/docker/pull/22184)) + +### Networking + +- Fix a panic that would occur when forwarding DNS query ([#22261](https://github.com/docker/docker/pull/22261)) +- Fix an issue where OS threads could end up within an incorrect network namespace when using user defined networks ([#22261](https://github.com/docker/docker/pull/22261)) + +### Runtime + +- Fix a bug preventing labels configuration to be reloaded via the config file ([#22299](https://github.com/docker/docker/pull/22299)) +- Fix a regression where container mounting `/var/run` would prevent other containers from being removed ([#22256](https://github.com/docker/docker/pull/22256)) +- Fix an issue where it would be impossible to update both `memory-swap` and `memory` value together ([#22255](https://github.com/docker/docker/pull/22255)) +- Fix a regression from 1.11.0 where the `/auth` endpoint would not initialize `serveraddress` if it is not provided ([#22254](https://github.com/docker/docker/pull/22254)) +- Add missing cleanup of container temporary files when cancelling a schedule restart ([#22237](https://github.com/docker/docker/pull/22237)) +- Remove scary error message when no restart policy is specified ([#21993](https://github.com/docker/docker/pull/21993)) +- Fix a panic that would occur when the plugins were activated via the json spec ([#22191](https://github.com/docker/docker/pull/22191)) +- Fix restart backoff logic to correctly reset delay if container ran for at least 10secs ([#22125](https://github.com/docker/docker/pull/22125)) +- Remove error message when a container restart get cancelled ([#22123](https://github.com/docker/docker/pull/22123)) +- Fix an issue where `docker` would not correctly clean up after `docker exec` ([#22121](https://github.com/docker/docker/pull/22121)) +- Fix a panic that could occur when serving concurrent `docker stats` commands ([#22120](https://github.com/docker/docker/pull/22120))` +- Revert deprecation of non-existent host directories auto-creation ([#22065](https://github.com/docker/docker/pull/22065)) +- Hide misleading rpc error on daemon shutdown ([#22058](https://github.com/docker/docker/pull/22058)) + +## 1.11.0 (2016-04-13) + +**IMPORTANT**: With Docker 1.11, a Linux docker installation is now made of 4 binaries (`docker`, [`docker-containerd`](https://github.com/docker/containerd), [`docker-containerd-shim`](https://github.com/docker/containerd) and [`docker-runc`](https://github.com/opencontainers/runc)). If you have scripts relying on docker being a single static binaries, please make sure to update them. Interaction with the daemon stay the same otherwise, the usage of the other binaries should be transparent. A Windows docker installation remains a single binary, `docker.exe`. + +### Builder + +- Fix a bug where Docker would not use the correct uid/gid when processing the `WORKDIR` command ([#21033](https://github.com/docker/docker/pull/21033)) +- Fix a bug where copy operations with userns would not use the proper uid/gid ([#20782](https://github.com/docker/docker/pull/20782), [#21162](https://github.com/docker/docker/pull/21162)) + +### Client + +* Usage of the `:` separator for security option has been deprecated. `=` should be used instead ([#21232](https://github.com/docker/docker/pull/21232)) ++ The client user agent is now passed to the registry on `pull`, `build`, `push`, `login` and `search` operations ([#21306](https://github.com/docker/docker/pull/21306), [#21373](https://github.com/docker/docker/pull/21373)) +* Allow setting the Domainname and Hostname separately through the API ([#20200](https://github.com/docker/docker/pull/20200)) +* Docker info will now warn users if it can not detect the kernel version or the operating system ([#21128](https://github.com/docker/docker/pull/21128)) +- Fix an issue where `docker stats --no-stream` output could be all 0s ([#20803](https://github.com/docker/docker/pull/20803)) +- Fix a bug where some newly started container would not appear in a running `docker stats` command ([#20792](https://github.com/docker/docker/pull/20792)) +* Post processing is no longer enabled for linux-cgo terminals ([#20587](https://github.com/docker/docker/pull/20587)) +- Values to `--hostname` are now refused if they do not comply with [RFC1123](https://tools.ietf.org/html/rfc1123) ([#20566](https://github.com/docker/docker/pull/20566)) ++ Docker learned how to use a SOCKS proxy ([#20366](https://github.com/docker/docker/pull/20366), [#18373](https://github.com/docker/docker/pull/18373)) ++ Docker now supports external credential stores ([#20107](https://github.com/docker/docker/pull/20107)) +* `docker ps` now supports displaying the list of volumes mounted inside a container ([#20017](https://github.com/docker/docker/pull/20017)) +* `docker info` now also reports Docker's root directory location ([#19986](https://github.com/docker/docker/pull/19986)) +- Docker now prohibits login in with an empty username (spaces are trimmed) ([#19806](https://github.com/docker/docker/pull/19806)) +* Docker events attributes are now sorted by key ([#19761](https://github.com/docker/docker/pull/19761)) +* `docker ps` no longer shows exported port for stopped containers ([#19483](https://github.com/docker/docker/pull/19483)) +- Docker now cleans after itself if a save/export command fails ([#17849](https://github.com/docker/docker/pull/17849)) +* Docker load learned how to display a progress bar ([#17329](https://github.com/docker/docker/pull/17329), [#120078](https://github.com/docker/docker/pull/20078)) + +### Distribution + +- Fix a panic that occurred when pulling an image with 0 layers ([#21222](https://github.com/docker/docker/pull/21222)) +- Fix a panic that could occur on error while pushing to a registry with a misconfigured token service ([#21212](https://github.com/docker/docker/pull/21212)) ++ All first-level delegation roles are now signed when doing a trusted push ([#21046](https://github.com/docker/docker/pull/21046)) ++ OAuth support for registries was added ([#20970](https://github.com/docker/docker/pull/20970)) +* `docker login` now handles token using the implementation found in [docker/distribution](https://github.com/docker/distribution) ([#20832](https://github.com/docker/docker/pull/20832)) +* `docker login` will no longer prompt for an email ([#20565](https://github.com/docker/docker/pull/20565)) +* Docker will now fallback to registry V1 if no basic auth credentials are available ([#20241](https://github.com/docker/docker/pull/20241)) +* Docker will now try to resume layer download where it left off after a network error/timeout ([#19840](https://github.com/docker/docker/pull/19840)) +- Fix generated manifest mediaType when pushing cross-repository ([#19509](https://github.com/docker/docker/pull/19509)) +- Fix docker requesting additional push credentials when pulling an image if Content Trust is enabled ([#20382](https://github.com/docker/docker/pull/20382)) + +### Logging + +- Fix a race in the journald log driver ([#21311](https://github.com/docker/docker/pull/21311)) +* Docker syslog driver now uses the RFC-5424 format when emitting logs ([#20121](https://github.com/docker/docker/pull/20121)) +* Docker GELF log driver now allows to specify the compression algorithm and level via the `gelf-compression-type` and `gelf-compression-level` options ([#19831](https://github.com/docker/docker/pull/19831)) +* Docker daemon learned to output uncolorized logs via the `--raw-logs` options ([#19794](https://github.com/docker/docker/pull/19794)) ++ Docker, on Windows platform, now includes an ETW (Event Tracing in Windows) logging driver named `etwlogs` ([#19689](https://github.com/docker/docker/pull/19689)) +* Journald log driver learned how to handle tags ([#19564](https://github.com/docker/docker/pull/19564)) ++ The fluentd log driver learned the following options: `fluentd-address`, `fluentd-buffer-limit`, `fluentd-retry-wait`, `fluentd-max-retries` and `fluentd-async-connect` ([#19439](https://github.com/docker/docker/pull/19439)) ++ Docker learned to send log to Google Cloud via the new `gcplogs` logging driver. ([#18766](https://github.com/docker/docker/pull/18766)) + + +### Misc + ++ When saving linked images together with `docker save` a subsequent `docker load` will correctly restore their parent/child relationship ([#21385](https://github.com/docker/docker/pull/21385)) ++ Support for building the Docker cli for OpenBSD was added ([#21325](https://github.com/docker/docker/pull/21325)) ++ Labels can now be applied at network, volume and image creation ([#21270](https://github.com/docker/docker/pull/21270)) +* The `dockremap` is now created as a system user ([#21266](https://github.com/docker/docker/pull/21266)) +- Fix a few response body leaks ([#21258](https://github.com/docker/docker/pull/21258)) +- Docker, when run as a service with systemd, will now properly manage its processes cgroups ([#20633](https://github.com/docker/docker/pull/20633)) +* `docker info` now reports the value of cgroup KernelMemory or emits a warning if it is not supported ([#20863](https://github.com/docker/docker/pull/20863)) +* `docker info` now also reports the cgroup driver in use ([#20388](https://github.com/docker/docker/pull/20388)) +* Docker completion is now available on PowerShell ([#19894](https://github.com/docker/docker/pull/19894)) +* `dockerinit` is no more ([#19490](https://github.com/docker/docker/pull/19490),[#19851](https://github.com/docker/docker/pull/19851)) ++ Support for building Docker on arm64 was added ([#19013](https://github.com/docker/docker/pull/19013)) ++ Experimental support for building docker.exe in a native Windows Docker installation ([#18348](https://github.com/docker/docker/pull/18348)) + +### Networking + +- Fix panic if a node is forcibly removed from the cluster ([#21671](https://github.com/docker/docker/pull/21671)) +- Fix "error creating vxlan interface" when starting a container in a Swarm cluster ([#21671](https://github.com/docker/docker/pull/21671)) +* `docker network inspect` will now report all endpoints whether they have an active container or not ([#21160](https://github.com/docker/docker/pull/21160)) ++ Experimental support for the MacVlan and IPVlan network drivers has been added ([#21122](https://github.com/docker/docker/pull/21122)) +* Output of `docker network ls` is now sorted by network name ([#20383](https://github.com/docker/docker/pull/20383)) +- Fix a bug where Docker would allow a network to be created with the reserved `default` name ([#19431](https://github.com/docker/docker/pull/19431)) +* `docker network inspect` returns whether a network is internal or not ([#19357](https://github.com/docker/docker/pull/19357)) ++ Control IPv6 via explicit option when creating a network (`docker network create --ipv6`). This shows up as a new `EnableIPv6` field in `docker network inspect` ([#17513](https://github.com/docker/docker/pull/17513)) +* Support for AAAA Records (aka IPv6 Service Discovery) in embedded DNS Server ([#21396](https://github.com/docker/docker/pull/21396)) +- Fix to not forward docker domain IPv6 queries to external servers ([#21396](https://github.com/docker/docker/pull/21396)) +* Multiple A/AAAA records from embedded DNS Server for DNS Round robin ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix endpoint count inconsistency after an ungraceful dameon restart ([#21261](https://github.com/docker/docker/pull/21261)) +- Move the ownership of exposed ports and port-mapping options from Endpoint to Sandbox ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed a bug which prevents docker reload when host is configured with ipv6.disable=1 ([#21019](https://github.com/docker/docker/pull/21019)) +- Added inbuilt nil IPAM driver ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed bug in iptables.Exists() logic [#21019](https://github.com/docker/docker/pull/21019) +- Fixed a Veth interface leak when using overlay network ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed a bug which prevents docker reload after a network delete during shutdown ([#20214](https://github.com/docker/docker/pull/20214)) +- Make sure iptables chains are recreated on firewalld reload ([#20419](https://github.com/docker/docker/pull/20419)) +- Allow to pass global datastore during config reload ([#20419](https://github.com/docker/docker/pull/20419)) +- For anonymous containers use the alias name for IP to name mapping, ie:DNS PTR record ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix a panic when deleting an entry from /etc/hosts file ([#21019](https://github.com/docker/docker/pull/21019)) +- Source the forwarded DNS queries from the container net namespace ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix to retain the network internal mode config for bridge networks on daemon reload ([#21780] (https://github.com/docker/docker/pull/21780)) +- Fix to retain IPAM driver option configs on daemon reload ([#21914] (https://github.com/docker/docker/pull/21914)) + +### Plugins + +- Fix a file descriptor leak that would occur every time plugins were enumerated ([#20686](https://github.com/docker/docker/pull/20686)) +- Fix an issue where Authz plugin would corrupt the payload body when faced with a large amount of data ([#20602](https://github.com/docker/docker/pull/20602)) + +### Runtime + +- Fix a panic that could occur when cleanup after a container started with invalid parameters ([#21716](https://github.com/docker/docker/pull/21716)) +- Fix a race with event timers stopping early ([#21692](https://github.com/docker/docker/pull/21692)) +- Fix race conditions in the layer store, potentially corrupting the map and crashing the process ([#21677](https://github.com/docker/docker/pull/21677)) +- Un-deprecate auto-creation of host directories for mounts. This feature was marked deprecated in ([#21666](https://github.com/docker/docker/pull/21666)) + Docker 1.9, but was decided to be too much of a backward-incompatible change, so it was decided to keep the feature. ++ It is now possible for containers to share the NET and IPC namespaces when `userns` is enabled ([#21383](https://github.com/docker/docker/pull/21383)) ++ `docker inspect ` will now expose the rootfs layers ([#21370](https://github.com/docker/docker/pull/21370)) ++ Docker Windows gained a minimal `top` implementation ([#21354](https://github.com/docker/docker/pull/21354)) +* Docker learned to report the faulty exe when a container cannot be started due to its condition ([#21345](https://github.com/docker/docker/pull/21345)) +* Docker with device mapper will now refuse to run if `udev sync` is not available ([#21097](https://github.com/docker/docker/pull/21097)) +- Fix a bug where Docker would not validate the config file upon configuration reload ([#21089](https://github.com/docker/docker/pull/21089)) +- Fix a hang that would happen on attach if initial start was to fail ([#21048](https://github.com/docker/docker/pull/21048)) +- Fix an issue where registry service options in the daemon configuration file were not properly taken into account ([#21045](https://github.com/docker/docker/pull/21045)) +- Fix a race between the exec and resize operations ([#21022](https://github.com/docker/docker/pull/21022)) +- Fix an issue where nanoseconds were not correctly taken in account when filtering Docker events ([#21013](https://github.com/docker/docker/pull/21013)) +- Fix the handling of Docker command when passed a 64 bytes id ([#21002](https://github.com/docker/docker/pull/21002)) +* Docker will now return a `204` (i.e http.StatusNoContent) code when it successfully deleted a network ([#20977](https://github.com/docker/docker/pull/20977)) +- Fix a bug where the daemon would wait indefinitely in case the process it was about to killed had already exited on its own ([#20967](https://github.com/docker/docker/pull/20967) +* The devmapper driver learned the `dm.min_free_space` option. If the mapped device free space reaches the passed value, new device creation will be prohibited. ([#20786](https://github.com/docker/docker/pull/20786)) ++ Docker can now prevent processes in container to gain new privileges via the `--security-opt=no-new-privileges` flag ([#20727](https://github.com/docker/docker/pull/20727)) +- Starting a container with the `--device` option will now correctly resolves symlinks ([#20684](https://github.com/docker/docker/pull/20684)) ++ Docker now relies on [`containerd`](https://github.com/docker/containerd) and [`runc`](https://github.com/opencontainers/runc) to spawn containers. ([#20662](https://github.com/docker/docker/pull/20662)) +- Fix docker configuration reloading to only alter value present in the given config file ([#20604](https://github.com/docker/docker/pull/20604)) ++ Docker now allows setting a container hostname via the `--hostname` flag when `--net=host` ([#20177](https://github.com/docker/docker/pull/20177)) ++ Docker now allows executing privileged container while running with `--userns-remap` if both `--privileged` and the new `--userns=host` flag are specified ([#20111](https://github.com/docker/docker/pull/20111)) +- Fix Docker not cleaning up correctly old containers upon restarting after a crash ([#19679](https://github.com/docker/docker/pull/19679)) +* Docker will now error out if it doesn't recognize a configuration key within the config file ([#19517](https://github.com/docker/docker/pull/19517)) +- Fix container loading, on daemon startup, when they depends on a plugin running within a container ([#19500](https://github.com/docker/docker/pull/19500)) +* `docker update` learned how to change a container restart policy ([#19116](https://github.com/docker/docker/pull/19116)) +* `docker inspect` now also returns a new `State` field containing the container state in a human readable way (i.e. one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`)([#18966](https://github.com/docker/docker/pull/18966)) ++ Docker learned to limit the number of active pids (i.e. processes) within the container via the `pids-limit` flags. NOTE: This requires `CGROUP_PIDS=y` to be in the kernel configuration. ([#18697](https://github.com/docker/docker/pull/18697)) +- `docker load` now has a `--quiet` option to suppress the load output ([#20078](https://github.com/docker/docker/pull/20078)) +- Fix a bug in neighbor discovery for IPv6 peers ([#20842](https://github.com/docker/docker/pull/20842)) +- Fix a panic during cleanup if a container was started with invalid options ([#21802](https://github.com/docker/docker/pull/21802)) +- Fix a situation where a container cannot be stopped if the terminal is closed ([#21840](https://github.com/docker/docker/pull/21840)) + +### Security + +* Object with the `pcp_pmcd_t` selinux type were given management access to `/var/lib/docker(/.*)?` ([#21370](https://github.com/docker/docker/pull/21370)) +* `restart_syscall`, `copy_file_range`, `mlock2` joined the list of allowed calls in the default seccomp profile ([#21117](https://github.com/docker/docker/pull/21117), [#21262](https://github.com/docker/docker/pull/21262)) +* `send`, `recv` and `x32` were added to the list of allowed syscalls and arch in the default seccomp profile ([#19432](https://github.com/docker/docker/pull/19432)) +* Docker Content Trust now requests the server to perform snapshot signing ([#21046](https://github.com/docker/docker/pull/21046)) +* Support for using YubiKeys for Content Trust signing has been moved out of experimental ([#21591](https://github.com/docker/docker/pull/21591)) + +### Volumes + +* Output of `docker volume ls` is now sorted by volume name ([#20389](https://github.com/docker/docker/pull/20389)) +* Local volumes can now accept options similar to the unix `mount` tool ([#20262](https://github.com/docker/docker/pull/20262)) +- Fix an issue where one letter directory name could not be used as source for volumes ([#21106](https://github.com/docker/docker/pull/21106)) ++ `docker run -v` now accepts a new flag `nocopy`. This tells the runtime not to copy the container path content into the volume (which is the default behavior) ([#21223](https://github.com/docker/docker/pull/21223)) + +## 1.10.3 (2016-03-10) + +### Runtime + +- Fix Docker client exiting with an "Unrecognized input header" error [#20706](https://github.com/docker/docker/pull/20706) +- Fix Docker exiting if Exec is started with both `AttachStdin` and `Detach` [#20647](https://github.com/docker/docker/pull/20647) + +### Distribution + +- Fix a crash when pushing multiple images sharing the same layers to the same repository in parallel [#20831](https://github.com/docker/docker/pull/20831) +- Fix a panic when pushing images to a registry which uses a misconfigured token service [#21030](https://github.com/docker/docker/pull/21030) + +### Plugin system + +- Fix issue preventing volume plugins to start when SELinux is enabled [#20834](https://github.com/docker/docker/pull/20834) +- Prevent Docker from exiting if a volume plugin returns a null response for Get requests [#20682](https://github.com/docker/docker/pull/20682) +- Fix plugin system leaking file descriptors if a plugin has an error [#20680](https://github.com/docker/docker/pull/20680) + +### Security + +- Fix linux32 emulation to fail during docker build [#20672](https://github.com/docker/docker/pull/20672) + It was due to the `personality` syscall being blocked by the default seccomp profile. +- Fix Oracle XE 10g failing to start in a container [#20981](https://github.com/docker/docker/pull/20981) + It was due to the `ipc` syscall being blocked by the default seccomp profile. +- Fix user namespaces not working on Linux From Scratch [#20685](https://github.com/docker/docker/pull/20685) +- Fix issue preventing daemon to start if userns is enabled and the `subuid` or `subgid` files contain comments [#20725](https://github.com/docker/docker/pull/20725) + +## 1.10.2 (2016-02-22) + +### Runtime + +- Prevent systemd from deleting containers' cgroups when its configuration is reloaded [#20518](https://github.com/docker/docker/pull/20518) +- Fix SELinux issues by disregarding `--read-only` when mounting `/dev/mqueue` [#20333](https://github.com/docker/docker/pull/20333) +- Fix chown permissions used during `docker cp` when userns is used [#20446](https://github.com/docker/docker/pull/20446) +- Fix configuration loading issue with all booleans defaulting to `true` [#20471](https://github.com/docker/docker/pull/20471) +- Fix occasional panic with `docker logs -f` [#20522](https://github.com/docker/docker/pull/20522) + +### Distribution + +- Keep layer reference if deletion failed to avoid a badly inconsistent state [#20513](https://github.com/docker/docker/pull/20513) +- Handle gracefully a corner case when canceling migration [#20372](https://github.com/docker/docker/pull/20372) +- Fix docker import on compressed data [#20367](https://github.com/docker/docker/pull/20367) +- Fix tar-split files corruption during migration that later cause docker push and docker save to fail [#20458](https://github.com/docker/docker/pull/20458) + +### Networking + +- Fix daemon crash if embedded DNS is sent garbage [#20510](https://github.com/docker/docker/pull/20510) + +### Volumes + +- Fix issue with multiple volume references with same name [#20381](https://github.com/docker/docker/pull/20381) + +### Security + +- Fix potential cache corruption and delegation conflict issues [#20523](https://github.com/docker/docker/pull/20523) + +## 1.10.1 (2016-02-11) + +### Runtime + +* Do not stop daemon on migration hard failure [#20156](https://github.com/docker/docker/pull/20156) +- Fix various issues with migration to content-addressable images [#20058](https://github.com/docker/docker/pull/20058) +- Fix ZFS permission bug with user namespaces [#20045](https://github.com/docker/docker/pull/20045) +- Do not leak /dev/mqueue from the host to all containers, keep it container-specific [#19876](https://github.com/docker/docker/pull/19876) [#20133](https://github.com/docker/docker/pull/20133) +- Fix `docker ps --filter before=...` to not show stopped containers without providing `-a` flag [#20135](https://github.com/docker/docker/pull/20135) + +### Security + +- Fix issue preventing docker events to work properly with authorization plugin [#20002](https://github.com/docker/docker/pull/20002) + +### Distribution + +* Add additional verifications and prevent from uploading invalid data to registries [#20164](https://github.com/docker/docker/pull/20164) +- Fix regression preventing uppercase characters in image reference hostname [#20175](https://github.com/docker/docker/pull/20175) + +### Networking + +- Fix embedded DNS for user-defined networks in the presence of firewalld [#20060](https://github.com/docker/docker/pull/20060) +- Fix issue where removing a network during shutdown left Docker inoperable [#20181](https://github.com/docker/docker/issues/20181) [#20235](https://github.com/docker/docker/issues/20235) +- Embedded DNS is now able to return compressed results [#20181](https://github.com/docker/docker/issues/20181) +- Fix port-mapping issue with `userland-proxy=false` [#20181](https://github.com/docker/docker/issues/20181) + +### Logging + +- Fix bug where tcp+tls protocol would be rejected [#20109](https://github.com/docker/docker/pull/20109) + +### Volumes + +- Fix issue whereby older volume drivers would not receive volume options [#19983](https://github.com/docker/docker/pull/19983) + +### Misc + +- Remove TasksMax from Docker systemd service [#20167](https://github.com/docker/docker/pull/20167) + +## 1.10.0 (2016-02-04) + +**IMPORTANT**: Docker 1.10 uses a new content-addressable storage for images and layers. +A migration is performed the first time docker is run, and can take a significant amount of time depending on the number of images present. +Refer to this page on the wiki for more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +We also released a cool migration utility that enables you to perform the migration before updating to reduce downtime. +Engine 1.10 migrator can be found on Docker Hub: https://hub.docker.com/r/docker/v1.10-migrator/ + +### Runtime + ++ New `docker update` command that allows updating resource constraints on running containers [#15078](https://github.com/docker/docker/pull/15078) ++ Add `--tmpfs` flag to `docker run` to create a tmpfs mount in a container [#13587](https://github.com/docker/docker/pull/13587) ++ Add `--format` flag to `docker images` command [#17692](https://github.com/docker/docker/pull/17692) ++ Allow to set daemon configuration in a file and hot-reload it with the `SIGHUP` signal [#18587](https://github.com/docker/docker/pull/18587) ++ Updated docker events to include more meta-data and event types [#18888](https://github.com/docker/docker/pull/18888) + This change is backward compatible in the API, but not on the CLI. ++ Add `--blkio-weight-device` flag to `docker run` [#13959](https://github.com/docker/docker/pull/13959) ++ Add `--device-read-bps` and `--device-write-bps` flags to `docker run` [#14466](https://github.com/docker/docker/pull/14466) ++ Add `--device-read-iops` and `--device-write-iops` flags to `docker run` [#15879](https://github.com/docker/docker/pull/15879) ++ Add `--oom-score-adj` flag to `docker run` [#16277](https://github.com/docker/docker/pull/16277) ++ Add `--detach-keys` flag to `attach`, `run`, `start` and `exec` commands to override the default key sequence that detaches from a container [#15666](https://github.com/docker/docker/pull/15666) ++ Add `--shm-size` flag to `run`, `create` and `build` to set the size of `/dev/shm` [#16168](https://github.com/docker/docker/pull/16168) ++ Show the number of running, stopped, and paused containers in `docker info` [#19249](https://github.com/docker/docker/pull/19249) ++ Show the `OSType` and `Architecture` in `docker info` [#17478](https://github.com/docker/docker/pull/17478) ++ Add `--cgroup-parent` flag on `daemon` to set cgroup parent for all containers [#19062](https://github.com/docker/docker/pull/19062) ++ Add `-L` flag to docker cp to follow symlinks [#16613](https://github.com/docker/docker/pull/16613) ++ New `status=dead` filter for `docker ps` [#17908](https://github.com/docker/docker/pull/17908) +* Change `docker run` exit codes to distinguish between runtime and application errors [#14012](https://github.com/docker/docker/pull/14012) +* Enhance `docker events --since` and `--until` to support nanoseconds and timezones [#17495](https://github.com/docker/docker/pull/17495) +* Add `--all`/`-a` flag to `stats` to include both running and stopped containers [#16742](https://github.com/docker/docker/pull/16742) +* Change the default cgroup-driver to `cgroupfs` [#17704](https://github.com/docker/docker/pull/17704) +* Emit a "tag" event when tagging an image with `build -t` [#17115](https://github.com/docker/docker/pull/17115) +* Best effort for linked containers' start order when starting the daemon [#18208](https://github.com/docker/docker/pull/18208) +* Add ability to add multiple tags on `build` [#15780](https://github.com/docker/docker/pull/15780) +* Permit `OPTIONS` request against any url, thus fixing issue with CORS [#19569](https://github.com/docker/docker/pull/19569) +- Fix the `--quiet` flag on `docker build` to actually be quiet [#17428](https://github.com/docker/docker/pull/17428) +- Fix `docker images --filter dangling=false` to now show all non-dangling images [#19326](https://github.com/docker/docker/pull/19326) +- Fix race condition causing autorestart turning off on restart [#17629](https://github.com/docker/docker/pull/17629) +- Recognize GPFS filesystems [#19216](https://github.com/docker/docker/pull/19216) +- Fix obscure bug preventing to start containers [#19751](https://github.com/docker/docker/pull/19751) +- Forbid `exec` during container restart [#19722](https://github.com/docker/docker/pull/19722) +- devicemapper: Increasing `--storage-opt dm.basesize` will now increase the base device size on daemon restart [#19123](https://github.com/docker/docker/pull/19123) + +### Security + ++ Add `--userns-remap` flag to `daemon` to support user namespaces (previously in experimental) [#19187](https://github.com/docker/docker/pull/19187) ++ Add support for custom seccomp profiles in `--security-opt` [#17989](https://github.com/docker/docker/pull/17989) ++ Add default seccomp profile [#18780](https://github.com/docker/docker/pull/18780) ++ Add `--authorization-plugin` flag to `daemon` to customize ACLs [#15365](https://github.com/docker/docker/pull/15365) ++ Docker Content Trust now supports the ability to read and write user delegations [#18887](https://github.com/docker/docker/pull/18887) + This is an optional, opt-in feature that requires the explicit use of the Notary command-line utility in order to be enabled. + Enabling delegation support in a specific repository will break the ability of Docker 1.9 and 1.8 to pull from that repository, if content trust is enabled. +* Allow SELinux to run in a container when using the BTRFS storage driver [#16452](https://github.com/docker/docker/pull/16452) + +### Distribution + +* Use content-addressable storage for images and layers [#17924](https://github.com/docker/docker/pull/17924) + Note that a migration is performed the first time docker is run; it can take a significant amount of time depending on the number of images and containers present. + Images no longer depend on the parent chain but contain a list of layer references. + `docker load`/`docker save` tarballs now also contain content-addressable image configurations. + For more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +* Add support for the new [manifest format ("schema2")](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) [#18785](https://github.com/docker/docker/pull/18785) +* Lots of improvements for push and pull: performance++, retries on failed downloads, cancelling on client disconnect [#18353](https://github.com/docker/docker/pull/18353), [#18418](https://github.com/docker/docker/pull/18418), [#19109](https://github.com/docker/docker/pull/19109), [#18353](https://github.com/docker/docker/pull/18353) +* Limit v1 protocol fallbacks [#18590](https://github.com/docker/docker/pull/18590) +- Fix issue where docker could hang indefinitely waiting for a nonexistent process to pull an image [#19743](https://github.com/docker/docker/pull/19743) + +### Networking + ++ Use DNS-based discovery instead of `/etc/hosts` [#19198](https://github.com/docker/docker/pull/19198) ++ Support for network-scoped alias using `--net-alias` on `run` and `--alias` on `network connect` [#19242](https://github.com/docker/docker/pull/19242) ++ Add `--ip` and `--ip6` on `run` and `network connect` to support custom IP addresses for a container in a network [#19001](https://github.com/docker/docker/pull/19001) ++ Add `--ipam-opt` to `network create` for passing custom IPAM options [#17316](https://github.com/docker/docker/pull/17316) ++ Add `--internal` flag to `network create` to restrict external access to and from the network [#19276](https://github.com/docker/docker/pull/19276) ++ Add `kv.path` option to `--cluster-store-opt` [#19167](https://github.com/docker/docker/pull/19167) ++ Add `discovery.heartbeat` and `discovery.ttl` options to `--cluster-store-opt` to configure discovery TTL and heartbeat timer [#18204](https://github.com/docker/docker/pull/18204) ++ Add `--format` flag to `network inspect` [#17481](https://github.com/docker/docker/pull/17481) ++ Add `--link` to `network connect` to provide a container-local alias [#19229](https://github.com/docker/docker/pull/19229) ++ Support for Capability exchange with remote IPAM plugins [#18775](https://github.com/docker/docker/pull/18775) ++ Add `--force` to `network disconnect` to force container to be disconnected from network [#19317](https://github.com/docker/docker/pull/19317) +* Support for multi-host networking using built-in overlay driver for all engine supported kernels: 3.10+ [#18775](https://github.com/docker/docker/pull/18775) +* `--link` is now supported on `docker run` for containers in user-defined network [#19229](https://github.com/docker/docker/pull/19229) +* Enhance `docker network rm` to allow removing multiple networks [#17489](https://github.com/docker/docker/pull/17489) +* Include container names in `network inspect` [#17615](https://github.com/docker/docker/pull/17615) +* Include auto-generated subnets for user-defined networks in `network inspect` [#17316](https://github.com/docker/docker/pull/17316) +* Add `--filter` flag to `network ls` to hide predefined networks [#17782](https://github.com/docker/docker/pull/17782) +* Add support for network connect/disconnect to stopped containers [#18906](https://github.com/docker/docker/pull/18906) +* Add network ID to container inspect [#19323](https://github.com/docker/docker/pull/19323) +- Fix MTU issue where Docker would not start with two or more default routes [#18108](https://github.com/docker/docker/pull/18108) +- Fix duplicate IP address for containers [#18106](https://github.com/docker/docker/pull/18106) +- Fix issue preventing sometimes docker from creating the bridge network [#19338](https://github.com/docker/docker/pull/19338) +- Do not substitute 127.0.0.1 name server when using `--net=host` [#19573](https://github.com/docker/docker/pull/19573) + +### Logging + ++ New logging driver for Splunk [#16488](https://github.com/docker/docker/pull/16488) ++ Add support for syslog over TCP+TLS [#18998](https://github.com/docker/docker/pull/18998) +* Enhance `docker logs --since` and `--until` to support nanoseconds and time [#17495](https://github.com/docker/docker/pull/17495) +* Enhance AWS logs to auto-detect region [#16640](https://github.com/docker/docker/pull/16640) + +### Volumes + ++ Add support to set the mount propagation mode for a volume [#17034](https://github.com/docker/docker/pull/17034) +* Add `ls` and `inspect` endpoints to volume plugin API [#16534](https://github.com/docker/docker/pull/16534) + Existing plugins need to make use of these new APIs to satisfy users' expectation + For that, please use the new MIME type `application/vnd.docker.plugins.v1.2+json` [#19549](https://github.com/docker/docker/pull/19549) +- Fix data not being copied to named volumes [#19175](https://github.com/docker/docker/pull/19175) +- Fix issues preventing volume drivers from being containerized [#19500](https://github.com/docker/docker/pull/19500) +- Fix `docker volumes ls --dangling=false` to now show all non-dangling volumes [#19671](https://github.com/docker/docker/pull/19671) +- Do not remove named volumes on container removal [#19568](https://github.com/docker/docker/pull/19568) +- Allow external volume drivers to host anonymous volumes [#19190](https://github.com/docker/docker/pull/19190) + +### Builder + ++ Add support for `**` in `.dockerignore` to wildcard multiple levels of directories [#17090](https://github.com/docker/docker/pull/17090) +- Fix handling of UTF-8 characters in Dockerfiles [#17055](https://github.com/docker/docker/pull/17055) +- Fix permissions problem when reading from STDIN [#19283](https://github.com/docker/docker/pull/19283) + +### Client + ++ Add support for overriding the API version to use via an `DOCKER_API_VERSION` environment-variable [#15964](https://github.com/docker/docker/pull/15964) +- Fix a bug preventing Windows clients to log in to Docker Hub [#19891](https://github.com/docker/docker/pull/19891) + +### Misc + +* systemd: Set TasksMax in addition to LimitNPROC in systemd service file [#19391](https://github.com/docker/docker/pull/19391) + +### Deprecations + +* Remove LXC support. The LXC driver was deprecated in Docker 1.8, and has now been removed [#17700](https://github.com/docker/docker/pull/17700) +* Remove `--exec-driver` daemon flag, because it is no longer in use [#17700](https://github.com/docker/docker/pull/17700) +* Remove old deprecated single-dashed long CLI flags (such as `-rm`; use `--rm` instead) [#17724](https://github.com/docker/docker/pull/17724) +* Deprecate HostConfig at API container start [#17799](https://github.com/docker/docker/pull/17799) +* Deprecate docker packages for newly EOL'd Linux distributions: Fedora 21 and Ubuntu 15.04 (Vivid) [#18794](https://github.com/docker/docker/pull/18794), [#18809](https://github.com/docker/docker/pull/18809) +* Deprecate `-f` flag for docker tag [#18350](https://github.com/docker/docker/pull/18350) + +## 1.9.1 (2015-11-21) + +### Runtime + +- Do not prevent daemon from booting if images could not be restored (#17695) +- Force IPC mount to unmount on daemon shutdown/init (#17539) +- Turn IPC unmount errors into warnings (#17554) +- Fix `docker stats` performance regression (#17638) +- Clarify cryptic error message upon `docker logs` if `--log-driver=none` (#17767) +- Fix seldom panics (#17639, #17634, #17703) +- Fix opq whiteouts problems for files with dot prefix (#17819) +- devicemapper: try defaulting to xfs instead of ext4 for performance reasons (#17903, #17918) +- devicemapper: fix displayed fs in docker info (#17974) +- selinux: only relabel if user requested so with the `z` option (#17450, #17834) +- Do not make network calls when normalizing names (#18014) + +### Client + +- Fix `docker login` on windows (#17738) +- Fix bug with `docker inspect` output when not connected to daemon (#17715) +- Fix `docker inspect -f {{.HostConfig.Dns}} somecontainer` (#17680) + +### Builder + +- Fix regression with symlink behavior in ADD/COPY (#17710) + +### Networking + +- Allow passing a network ID as an argument for `--net` (#17558) +- Fix connect to host and prevent disconnect from host for `host` network (#17476) +- Fix `--fixed-cidr` issue when gateway ip falls in ip-range and ip-range is + not the first block in the network (#17853) +- Restore deterministic `IPv6` generation from `MAC` address on default `bridge` network (#17890) +- Allow port-mapping only for endpoints created on docker run (#17858) +- Fixed an endpoint delete issue with a possible stale sbox (#18102) + +### Distribution + +- Correct parent chain in v2 push when v1Compatibility files on the disk are inconsistent (#18047) + +## 1.9.0 (2015-11-03) + +### Runtime + ++ `docker stats` now returns block IO metrics (#15005) ++ `docker stats` now details network stats per interface (#15786) ++ Add `ancestor=` filter to `docker ps --filter` flag to filter +containers based on their ancestor images (#14570) ++ Add `label=` filter to `docker ps --filter` to filter containers +based on label (#16530) ++ Add `--kernel-memory` flag to `docker run` (#14006) ++ Add `--message` flag to `docker import` allowing to specify an optional +message (#15711) ++ Add `--privileged` flag to `docker exec` (#14113) ++ Add `--stop-signal` flag to `docker run` allowing to replace the container +process stopping signal (#15307) ++ Add a new `unless-stopped` restart policy (#15348) ++ Inspecting an image now returns tags (#13185) ++ Add container size information to `docker inspect` (#15796) ++ Add `RepoTags` and `RepoDigests` field to `/images/{name:.*}/json` (#17275) +- Remove the deprecated `/container/ps` endpoint from the API (#15972) +- Send and document correct HTTP codes for `/exec//start` (#16250) +- Share shm and mqueue between containers sharing IPC namespace (#15862) +- Event stream now shows OOM status when `--oom-kill-disable` is set (#16235) +- Ensure special network files (/etc/hosts etc.) are read-only if bind-mounted +with `ro` option (#14965) +- Improve `rmi` performance (#16890) +- Do not update /etc/hosts for the default bridge network, except for links (#17325) +- Fix conflict with duplicate container names (#17389) +- Fix an issue with incorrect template execution in `docker inspect` (#17284) +- DEPRECATE `-c` short flag variant for `--cpu-shares` in docker run (#16271) + +### Client + ++ Allow `docker import` to import from local files (#11907) + +### Builder + ++ Add a `STOPSIGNAL` Dockerfile instruction allowing to set a different +stop-signal for the container process (#15307) ++ Add an `ARG` Dockerfile instruction and a `--build-arg` flag to `docker build` +that allows to add build-time environment variables (#15182) +- Improve cache miss performance (#16890) + +### Storage + +- devicemapper: Implement deferred deletion capability (#16381) + +### Networking + ++ `docker network` exits experimental and is part of standard release (#16645) ++ New network top-level concept, with associated subcommands and API (#16645) + WARNING: the API is different from the experimental API ++ Support for multiple isolated/micro-segmented networks (#16645) ++ Built-in multihost networking using VXLAN based overlay driver (#14071) ++ Support for third-party network plugins (#13424) ++ Ability to dynamically connect containers to multiple networks (#16645) ++ Support for user-defined IP address management via pluggable IPAM drivers (#16910) ++ Add daemon flags `--cluster-store` and `--cluster-advertise` for built-in nodes discovery (#16229) ++ Add `--cluster-store-opt` for setting up TLS settings (#16644) ++ Add `--dns-opt` to the daemon (#16031) +- DEPRECATE following container `NetworkSettings` fields in API v1.21: `EndpointID`, `Gateway`, + `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, `IPPrefixLen`, `IPv6Gateway` and `MacAddress`. + Those are now specific to the `bridge` network. Use `NetworkSettings.Networks` to inspect + the networking settings of a container per network. + +### Volumes + ++ New top-level `volume` subcommand and API (#14242) +- Move API volume driver settings to host-specific config (#15798) +- Print an error message if volume name is not unique (#16009) +- Ensure volumes created from Dockerfiles always use the local volume driver +(#15507) +- DEPRECATE auto-creating missing host paths for bind mounts (#16349) + +### Logging + ++ Add `awslogs` logging driver for Amazon CloudWatch (#15495) ++ Add generic `tag` log option to allow customizing container/image +information passed to driver (e.g. show container names) (#15384) +- Implement the `docker logs` endpoint for the journald driver (#13707) +- DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) + +### Distribution + ++ `docker search` now works with partial names (#16509) +- Push optimization: avoid buffering to file (#15493) +- The daemon will display progress for images that were already being pulled +by another client (#15489) +- Only permissions required for the current action being performed are requested (#) ++ Renaming trust keys (and respective environment variables) from `offline` to +`root` and `tagging` to `repository` (#16894) +- DEPRECATE trust key environment variables +`DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and +`DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` (#16894) + +### Security + ++ Add SELinux profiles to the rpm package (#15832) +- Fix various issues with AppArmor profiles provided in the deb package +(#14609) +- Add AppArmor policy that prevents writing to /proc (#15571) + +## 1.8.3 (2015-10-12) + +### Distribution + +- Fix layer IDs lead to local graph poisoning (CVE-2014-8178) +- Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) ++ Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry + +## 1.8.2 (2015-09-10) + +### Distribution + +- Fixes rare edge case of handling GNU LongLink and LongName entries. +- Fix ^C on docker pull. +- Fix docker pull issues on client disconnection. +- Fix issue that caused the daemon to panic when loggers weren't configured properly. +- Fix goroutine leak pulling images from registry V2. + +### Runtime + +- Fix a bug mounting cgroups for docker daemons running inside docker containers. +- Initialize log configuration properly. + +### Client: + +- Handle `-q` flag in `docker ps` properly when there is a default format. + +### Networking + +- Fix several corner cases with netlink. + +### Contrib + +- Fix several issues with bash completion. + +## 1.8.1 (2015-08-12) + +### Distribution + +* Fix a bug where pushing multiple tags would result in invalid images + +## 1.8.0 (2015-08-11) + +### Distribution + ++ Trusted pull, push and build, disabled by default +* Make tar layers deterministic between registries +* Don't allow deleting the image of running containers +* Check if a tag name to load is a valid digest +* Allow one character repository names +* Add a more accurate error description for invalid tag name +* Make build cache ignore mtime + +### Cli + ++ Add support for DOCKER_CONFIG/--config to specify config file dir ++ Add --type flag for docker inspect command ++ Add formatting options to `docker ps` with `--format` ++ Replace `docker -d` with new subcommand `docker daemon` +* Zsh completion updates and improvements +* Add some missing events to bash completion +* Support daemon urls with base paths in `docker -H` +* Validate status= filter to docker ps +* Display when a container is in --net=host in docker ps +* Extend docker inspect to export image metadata related to graph driver +* Restore --default-gateway{,-v6} daemon options +* Add missing unpublished ports in docker ps +* Allow duration strings in `docker events` as --since/--until +* Expose more mounts information in `docker inspect` + +### Runtime + ++ Add new Fluentd logging driver ++ Allow `docker import` to load from local files ++ Add logging driver for GELF via UDP ++ Allow to copy files from host to containers with `docker cp` ++ Promote volume drivers from experimental to master ++ Add rollover options to json-file log driver, and --log-driver-opts flag ++ Add memory swappiness tuning options +* Remove cgroup read-only flag when privileged +* Make /proc, /sys, & /dev readonly for readonly containers +* Add cgroup bind mount by default +* Overlay: Export metadata for container and image in `docker inspect` +* Devicemapper: external device activation +* Devicemapper: Compare uuid of base device on startup +* Remove RC4 from the list of registry cipher suites +* Add syslog-facility option +* LXC execdriver compatibility with recent LXC versions +* Mark LXC execriver as deprecated (to be removed with the migration to runc) + +### Plugins + +* Separate plugin sockets and specs locations +* Allow TLS connections to plugins + +### Bug fixes + +- Add missing 'Names' field to /containers/json API output +- Make `docker rmi` of dangling images safe while pulling +- Devicemapper: Change default basesize to 100G +- Go Scheduler issue with sync.Mutex and gcc +- Fix issue where Search API endpoint would panic due to empty AuthConfig +- Set image canonical names correctly +- Check dockerinit only if lxc driver is used +- Fix ulimit usage of nproc +- Always attach STDIN if -i,--interactive is specified +- Show error messages when saving container state fails +- Fixed incorrect assumption on --bridge=none treated as disable network +- Check for invalid port specifications in host configuration +- Fix endpoint leave failure for --net=host mode +- Fix goroutine leak in the stats API if the container is not running +- Check for apparmor file before reading it +- Fix DOCKER_TLS_VERIFY being ignored +- Set umask to the default on startup +- Correct the message of pause and unpause a non-running container +- Adjust disallowed CpuShares in container creation +- ZFS: correctly apply selinux context +- Display empty string instead of when IP opt is nil +- `docker kill` returns error when container is not running +- Fix COPY/ADD quoted/json form +- Fix goroutine leak on logs -f with no output +- Remove panic in nat package on invalid hostport +- Fix container linking in Fedora 22 +- Fix error caused using default gateways outside of the allocated range +- Format times in inspect command with a template as RFC3339Nano +- Make registry client to accept 2xx and 3xx http status responses as successful +- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order. +- Fix error when the docker ps format was not valid. +- Remove redundant ip forward check. +- Fix issue trying to push images to repository mirrors. +- Fix error cleaning up network entrypoints when there is an initialization issue. + +## 1.7.1 (2015-07-14) + +#### Runtime + +- Fix default user spawning exec process with `docker exec` +- Make `--bridge=none` not to configure the network bridge +- Publish networking stats properly +- Fix implicit devicemapper selection with static binaries +- Fix socket connections that hung intermittently +- Fix bridge interface creation on CentOS/RHEL 6.6 +- Fix local dns lookups added to resolv.conf +- Fix copy command mounting volumes +- Fix read/write privileges in volumes mounted with --volumes-from + +#### Remote API + +- Fix unmarshalling of Command and Entrypoint +- Set limit for minimum client version supported +- Validate port specification +- Return proper errors when attach/reattach fail + +#### Distribution + +- Fix pulling private images +- Fix fallback between registry V2 and V1 + +## 1.7.0 (2015-06-16) + +#### Runtime ++ Experimental feature: support for out-of-process volume plugins +* The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag +* The `exec` command supports the `-u|--user` flag to specify the new process owner ++ Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags ++ The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota` ++ Container block IO can be controlled in `docker run` using`--blkio-weight` ++ ZFS support ++ The `docker logs` command supports a `--since` argument ++ UTS namespace can be shared with the host with `docker run --uts=host` + +#### Quality +* Networking stack was entirely rewritten as part of the libnetwork effort +* Engine internals refactoring +* Volumes code was entirely rewritten to support the plugins effort ++ Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting + +#### Build ++ Support ${variable:-value} and ${variable:+value} syntax for environment variables ++ Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems` ++ git context changes with branches and directories +* The .dockerignore file support exclusion rules + +#### Distribution ++ Client support for v2 mirroring support for the official registry + +#### Bugfixes +* Firewalld is now supported and will automatically be used when available +* mounting --device recursively + +## 1.6.2 (2015-05-13) + +#### Runtime +- Revert change prohibiting mounting into /sys + +## 1.6.1 (2015-05-07) + +#### Security +- Fix read/write /proc paths (CVE-2015-3630) +- Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) +- Fix opening of file-descriptor 1 (CVE-2015-3627) +- Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) +- Prohibit mount of /sys + +#### Runtime +- Update AppArmor policy to not allow mounts + +## 1.6.0 (2015-04-07) + +#### Builder ++ Building images from an image ID ++ Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` ++ `commit --change` to apply specified Dockerfile instructions while committing the image ++ `import --change` to apply specified Dockerfile instructions while importing the image ++ Builds no longer continue in the background when canceled with CTRL-C + +#### Client ++ Windows Support + +#### Runtime ++ Container and image Labels ++ `--cgroup-parent` for specifying a parent cgroup to place container cgroup within ++ Logging drivers, `json-file`, `syslog`, or `none` ++ Pulling images by ID ++ `--ulimit` to set the ulimit on a container ++ `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) + +## 1.5.0 (2015-02-10) + +#### Builder ++ Dockerfile to use for a given `docker build` can be specified with the `-f` flag +* Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache +* ADD and COPY instructions accept relative paths +* Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier +* Improve performance when exposing a large number of ports + +#### Hack ++ Allow client-side only integration tests for Windows +* Include docker-py integration tests against Docker daemon as part of our test suites + +#### Packaging ++ Support for the new version of the registry HTTP API +* Speed up `docker push` for images with a majority of already existing layers +- Fixed contacting a private registry through a proxy + +#### Remote API ++ A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command ++ Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command +* Container `inspect` endpoint show the ID of `exec` commands running in this container +* Container `inspect` endpoint show the number of times Docker auto-restarted the container +* New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start' +- Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes + +#### Runtime ++ Docker daemon has full IPv6 support ++ The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools ++ The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted ++ Container total memory usage can be limited for `docker run` using the `--memory-swap` flag +* Major stability improvements for devicemapper storage driver +* Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted +* Better integration with host system: per-container iptable rules are moved to the DOCKER chain +- Fixed container exiting on out of memory to return an invalid exit code + +#### Other +* The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon + +## 1.4.1 (2014-12-15) + +#### Runtime +- Fix issue with volumes-from and bind mounts not being honored after create + +## 1.4.0 (2014-12-11) + +#### Notable Features since 1.3.0 ++ Set key=value labels to the daemon (displayed in `docker info`), applied with + new `-label` daemon flag ++ Add support for `ENV` in Dockerfile of the form: + `ENV name=value name2=value2...` ++ New Overlayfs Storage Driver ++ `docker info` now returns an `ID` and `Name` field ++ Filter events by event name, container, or image ++ `docker cp` now supports copying from container volumes +- Fixed `docker tag`, so it honors `--force` when overriding a tag for existing + image. + +## 1.3.3 (2014-12-11) + +#### Security +- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) +- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) +- Validate image IDs (CVE-2014-9358) + +#### Runtime +- Fix an issue when image archives are being read slowly + +#### Client +- Fix a regression related to stdin redirection +- Fix a regression with `docker cp` when destination is the current directory + +## 1.3.2 (2014-11-20) + +#### Security +- Fix tar breakout vulnerability +* Extractions are now sandboxed chroot +- Security options are no longer committed to images + +#### Runtime +- Fix deadlock in `docker ps -f exited=1` +- Fix a bug when `--volumes-from` references a container that failed to start + +#### Registry ++ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 +* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag +- Skip the experimental registry v2 API when mirroring is enabled + +## 1.3.1 (2014-10-28) + +#### Security +* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry ++ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified + +#### Runtime +- Fix issue where volumes would not be shared + +#### Client +- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` +- Fix docker run output to non-TTY stdout + +#### Builder +- Fix escaping `$` for environment variables +- Fix issue with lowercase `onbuild` Dockerfile instruction +- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` + +## 1.3.0 (2014-10-14) + +#### Notable features since 1.2.0 ++ Docker `exec` allows you to run additional processes inside existing containers ++ Docker `create` gives you the ability to create a container via the CLI without executing a process ++ `--security-opts` options to allow user to customize container labels and apparmor profiles ++ Docker `ps` filters +- Wildcard support to COPY/ADD ++ Move production URLs to get.docker.com from get.docker.io ++ Allocate IP address on the bridge inside a valid CIDR ++ Use drone.io for PR and CI testing ++ Ability to setup an official registry mirror ++ Ability to save multiple images with docker `save` + +## 1.2.0 (2014-08-20) + +#### Runtime ++ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime ++ Auto-restart containers using policies ++ Use /var/lib/docker/tmp for large temporary files ++ `--cap-add` and `--cap-drop` to tweak what linux capability you want ++ `--device` to use devices in containers + +#### Client ++ `docker search` on private registries ++ Add `exited` filter to `docker ps --filter` +* `docker rm -f` now kills instead of stop ++ Support for IPv6 addresses in `--dns` flag + +#### Proxy ++ Proxy instances in separate processes +* Small bug fix on UDP proxy + +## 1.1.2 (2014-07-23) + +#### Runtime ++ Fix port allocation for existing containers ++ Fix containers restart on daemon restart + +#### Packaging ++ Fix /etc/init.d/docker issue on Debian + +## 1.1.1 (2014-07-09) + +#### Builder +* Fix issue with ADD + +## 1.1.0 (2014-07-03) + +#### Notable features since 1.0.1 ++ Add `.dockerignore` support ++ Pause containers during `docker commit` ++ Add `--tail` to `docker logs` + +#### Builder ++ Allow a tar file as context for `docker build` +* Fix issue with white-spaces and multi-lines in `Dockerfiles` + +#### Runtime +* Overall performance improvements +* Allow `/` as source of `docker run -v` +* Fix port allocation +* Fix bug in `docker save` +* Add links information to `docker inspect` + +#### Client +* Improve command line parsing for `docker commit` + +#### Remote API +* Improve status code for the `start` and `stop` endpoints + +## 1.0.1 (2014-06-19) + +#### Notable features since 1.0.0 +* Enhance security for the LXC driver + +#### Builder +* Fix `ONBUILD` instruction passed to grandchildren + +#### Runtime +* Fix events subscription +* Fix /etc/hostname file with host networking +* Allow `-h` and `--net=none` +* Fix issue with hotplug devices in `--privileged` + +#### Client +* Fix artifacts with events +* Fix a panic with empty flags +* Fix `docker cp` on Mac OS X + +#### Miscellaneous +* Fix compilation on Mac OS X +* Fix several races + +## 1.0.0 (2014-06-09) + +#### Notable features since 0.12.0 +* Production support + +## 0.12.0 (2014-06-05) + +#### Notable features since 0.11.0 +* 40+ various improvements to stability, performance and usability +* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file +* Inherit file permissions from the host on `ADD` +* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer +* The `images` command has a `-f`/`--filter` option to filter the list of images +* Add `--force-rm` to clean up after a failed build +* Standardize JSON keys in Remote API to CamelCase +* Pull from a docker run now assumes `latest` tag if not specified +* Enhance security on Linux capabilities and device nodes + +## 0.11.1 (2014-05-07) + +#### Registry +- Fix push and pull to private registry + +## 0.11.0 (2014-05-07) + +#### Notable features since 0.10.0 + +* SELinux support for mount and process labels +* Linked containers can be accessed by hostname +* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces +* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon +* Logs can now be returned with an optional timestamp +* Docker now works with registries that support SHA-512 +* Multiple registry endpoints are supported to allow registry mirrors + +## 0.10.0 (2014-04-08) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. +- Follow symlinks inside container's root for ADD build instructions. +- Fix EXPOSE caching. + +#### Documentation +- Add the new options of `docker ps` to the documentation. +- Add the options of `docker restart` to the documentation. +- Update daemon docs and help messages for --iptables and --ip-forward. +- Updated apt-cacher-ng docs example. +- Remove duplicate description of --mtu from docs. +- Add missing -t and -v for `docker images` to the docs. +- Add fixes to the cli docs. +- Update libcontainer docs. +- Update images in docs to remove references to AUFS and LXC. +- Update the nodejs_web_app in the docs to use the new epel RPM address. +- Fix external link on security of containers. +- Update remote API docs. +- Add image size to history docs. +- Be explicit about binding to all interfaces in redis example. +- Document DisableNetwork flag in the 1.10 remote api. +- Document that `--lxc-conf` is lxc only. +- Add chef usage documentation. +- Add example for an image with multiple for `docker load`. +- Explain what `docker run -a` does in the docs. + +#### Contrib +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. +- Add check-config script to contrib. +- Fix fish shell completion. + +#### Hack +* Clean up "go test" output from "make test" to be much more readable/scannable. +* Exclude more "definitely not unit tested Go source code" directories from hack/make/test. ++ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. +- Include contributed completions in Ubuntu PPA. ++ Add cli integration tests. +* Add tweaks to the hack scripts to make them simpler. + +#### Remote API ++ Add TLS auth support for API. +* Move git clone from daemon to client. +- Fix content-type detection in docker cp. +* Split API into 2 go packages. + +#### Runtime +* Support hairpin NAT without going through Docker server. +- devicemapper: succeed immediately when removing non-existent devices. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). +- devicemapper: increase timeout in waitClose to 10 seconds. +- devicemapper: ensure we shut down thin pool cleanly. +- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. +- devicemapper: avoid AB-BA deadlock. +- devicemapper: make shutdown better/faster. +- improve alpha sorting in mflag. +- Remove manual http cookie management because the cookiejar is being used. +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Add FreeBSD support for the client. +- Merge auth package into registry. +- Add deprecation warning for -t on `docker pull`. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. +- Fix attach exit on darwin. +- Improve deprecation message. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Only unshare the mount namespace for execin. +- Merge existing config when committing. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Mount cgroups automatically if they're not mounted already. +- Use mock for search tests. +- Update to double-dash everywhere. +- Move .dockerenv parsing to lxc driver. +- Move all bind-mounts in the container inside the namespace. +- Don't use separate bind mount for container. +- Always symlink /dev/ptmx for libcontainer. +- Don't kill by pid for other drivers. +- Add initial logging to libcontainer. +* Sort by port in `docker ps`. +- Move networking drivers into runtime top level package. ++ Add --no-prune to `docker rmi`. ++ Add time since exit in `docker ps`. +- graphdriver: add build tags. +- Prevent allocation of previously allocated ports & prevent improve port allocation. +* Add support for --since/--before in `docker ps`. +- Clean up container stop. ++ Add support for configurable dns search domains. +- Add support for relative WORKDIR instructions. +- Add --output flag for docker save. +- Remove duplication of DNS entries in config merging. +- Add cpuset.cpus to cgroups and native driver options. +- Remove docker-ci. +- Promote btrfs. btrfs is no longer considered experimental. +- Add --input flag to `docker load`. +- Return error when existing bridge doesn't match IP address. +- Strip comments before parsing line continuations to avoid interpreting instructions as comments. +- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. +- Add systemd implementation of cgroups and make containers show up as systemd units. +- Fix commit and import when no repository is specified. +- Remount /var/lib/docker as --private to fix scaling issue. +- Use the environment's proxy when pinging the remote registry. +- Reduce error level from harmless errors. +* Allow --volumes-from to be individual files. +- Fix expanding buffer in StdCopy. +- Set error regardless of attach or stdin. This fixes #3364. +- Add support for --env-file to load environment variables from files. +- Symlink /etc/mtab and /proc/mounts. +- Allow pushing a single tag. +- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. +- Don't throw error when starting an already running container. +- Fix dynamic port allocation limit. +- remove setupDev from libcontainer. +- Add API version to `docker version`. +- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. +- Fix --volumes-from mount failure. +- Allow non-privileged containers to create device nodes. +- Skip login tests because of external dependency on a hosted service. +- Deprecate `docker images --tree` and `docker images --viz`. +- Deprecate `docker insert`. +- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. +- Add specific error message when hitting 401 over HTTP on push. +- Fix absolute volume check. +- Remove volumes-from from the config. +- Move DNS options to hostconfig. +- Update the apparmor profile for libcontainer. +- Add deprecation notice for `docker commit -run`. + +## 0.9.1 (2014-03-24) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. + +#### Documentation +- Fix external link on security of containers. + +#### Contrib +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. + +#### Hack +- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. + +#### Remote API +- Fix content-type detection in `docker cp`. + +#### Runtime +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Only unshare the mount namespace for execin. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Merge existing config when committing. +- Fix panic in monitor. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Improve deprecation message. +- Fix attach exit on darwin. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). +- devicemapper: succeed immediately when removing non-existent devices. +- devicemapper: increase timeout in waitClose to 10 seconds. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. + +## 0.9.0 (2014-03-10) + +#### Builder +- Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. +- Add error to docker build --rm. This adds missing error handling. +- Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. +- Make `--rm` the default for `docker build`. + +#### Documentation +- Download the docker client binary for Mac over https. +- Update the titles of the install instructions & descriptions. +* Add instructions for upgrading boot2docker. +* Add port forwarding example in OS X install docs. +- Attempt to disentangle repository and registry. +- Update docs to explain more about `docker ps`. +- Update sshd example to use a Dockerfile. +- Rework some examples, including the Python examples. +- Update docs to include instructions for a container's lifecycle. +- Update docs documentation to discuss the docs branch. +- Don't skip cert check for an example & use HTTPS. +- Bring back the memory and swap accounting section which was lost when the kernel page was removed. +- Explain DNS warnings and how to fix them on systems running and using a local nameserver. + +#### Contrib +- Add Tanglu support for mkimage-debootstrap. +- Add SteamOS support for mkimage-debootstrap. + +#### Hack +- Get package coverage when running integration tests. +- Remove the Vagrantfile. This is being replaced with boot2docker. +- Fix tests on systems where aufs isn't available. +- Update packaging instructions and remove the dependency on lxc. + +#### Remote API +* Move code specific to the API to the api package. +- Fix header content type for the API. Makes all endpoints use proper content type. +- Fix registry auth & remove ping calls from CmdPush and CmdPull. +- Add newlines to the JSON stream functions. + +#### Runtime +* Do not ping the registry from the CLI. All requests to registries flow through the daemon. +- Check for nil information return in the lxc driver. This fixes panics with older lxc versions. +- Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. +- Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. +* Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. +- Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. +- Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. +- Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. +- Fix `--run` in `docker commit`. This makes `docker commit --run` work again. +- Fix custom bridge related options. This makes custom bridges work again. ++ Mount-bind the PTY as container console. This allows tmux/screen to run. ++ Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. ++ Add native exec driver which uses libcontainer and make it the default exec driver. +- Add support for handling extended attributes in archives. +* Set the container MTU to be the same as the host MTU. ++ Add simple sha256 checksums for layers to speed up `docker push`. +* Improve kernel version parsing. +* Allow flag grouping (`docker run -it`). +- Remove chroot exec driver. +- Fix divide by zero to fix panic. +- Rewrite `docker rmi`. +- Fix docker info with lxc 1.0.0. +- Fix fedora tty with apparmor. +* Don't always append env vars, replace defaults with vars from config. +* Fix a goroutine leak. +* Switch to Go 1.2.1. +- Fix unique constraint error checks. +* Handle symlinks for Docker's data directory and for TMPDIR. +- Add deprecation warnings for flags (-flag is deprecated in favor of --flag) +- Add apparmor profile for the native execution driver. +* Move system specific code from archive to pkg/system. +- Fix duplicate signal for `docker run -i -t` (issue #3336). +- Return correct process pid for lxc. +- Add a -G option to specify the group which unix sockets belong to. ++ Add `-f` flag to `docker rm` to force removal of running containers. ++ Kill ghost containers and restart all ghost containers when the docker daemon restarts. ++ Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. + +## 0.8.1 (2014-02-18) + +#### Builder + +- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system +- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported + +#### Documentation + +* Update issue filing instructions +* Warn against the use of symlinks for Docker's storage folder +* Replace the Firefox example with an IceWeasel example +* Rewrite the PostgreSQL example using a Dockerfile and add more details to it +* Improve the OS X documentation + +#### Remote API + +- Fix broken images API for version less than 1.7 +- Use the right encoding for all API endpoints which return JSON +- Move remote api client to api/ +- Queue calls to the API using generic socket wait + +#### Runtime + +- Fix the use of custom settings for bridges and custom bridges +- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures +- Remove two panics which could make Docker crash in some situations +- Don't ping registry from the CLI client +- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks +- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration +- Remove directory when removing devicemapper device. This cleans up leftover mount directories +- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration +- Ensure `docker cp` stream is closed properly +- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port ++ Mount-bind the PTY as container console. This allows tmux and screen to run in a container +- Clean up archive closing. This fixes and improves archive handling +- Fix engine tests on systems where temp directories are symlinked +- Add test methods for save and load +- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart +- Support submodules when building from a GitHub repository +- Quote volume path to allow spaces +- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs + +## 0.8.0 (2014-02-04) + +#### Notable features since 0.7.0 + +* Images and containers can be removed much faster +* Building an image from source with docker build is now much faster +* The Docker daemon starts and stops much faster +* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations +* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations +* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar +* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers +With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages +* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change + +* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed +* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build +* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write +* Docker is officially supported on Mac OS X +* The Docker daemon supports systemd socket activation + +## 0.7.6 (2014-01-14) + +#### Builder + +* Do not follow symlink outside of build context + +#### Runtime + +- Remount bind mounts when ro is specified +* Use https for fetching docker version + +#### Other + +* Inline the test.docker.io fingerprint +* Add ca-certificates to packaging documentation + +## 0.7.5 (2014-01-09) + +#### Builder + +* Disable compression for build. More space usage but a much faster upload +- Fix ADD caching for certain paths +- Do not compress archive from git build + +#### Documentation + +- Fix error in GROUP add example +* Make sure the GPG fingerprint is inline in the documentation +* Give more specific advice on setting up signing of commits for DCO + +#### Runtime + +- Fix misspelled container names +- Do not add hostname when networking is disabled +* Return most recent image from the cache by date +- Return all errors from docker wait +* Add Content-Type Header "application/json" to GET /version and /info responses + +#### Other + +* Update DCO to version 1.1 ++ Update Makefile to use "docker:GIT_BRANCH" as the generated image name +* Update Travis to check for new 1.1 DCO version + +## 0.7.4 (2014-01-07) + +#### Builder + +- Fix ADD caching issue with . prefixed path +- Fix docker build on devicemapper by reverting sparse file tar option +- Fix issue with file caching and prevent wrong cache hit +* Use same error handling while unmarshalling CMD and ENTRYPOINT + +#### Documentation + +* Simplify and streamline Amazon Quickstart +* Install instructions use unprefixed Fedora image +* Update instructions for mtu flag for Docker on GCE ++ Add Ubuntu Saucy to installation +- Fix for wrong version warning on master instead of latest + +#### Runtime + +- Only get the image's rootfs when we need to calculate the image size +- Correctly handle unmapping UDP ports +* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build +- Fix login message to say pull instead of push +- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN +* Make blank -H option default to the same as no -H was sent +* Extract cgroups utilities to own submodule + +#### Other + ++ Add Travis CI configuration to validate DCO and gofmt requirements ++ Add Developer Certificate of Origin Text +* Upgrade VBox Guest Additions +* Check standalone header when pinging a registry server + +## 0.7.3 (2014-01-02) + +#### Builder + ++ Update ADD to use the image cache, based on a hash of the added content +* Add error message for empty Dockerfile + +#### Documentation + +- Fix outdated link to the "Introduction" on www.docker.io ++ Update the docs to get wider when the screen does +- Add information about needing to install LXC when using raw binaries +* Update Fedora documentation to disentangle the docker and docker.io conflict +* Add a note about using the new `-mtu` flag in several GCE zones ++ Add FrugalWare installation instructions ++ Add a more complete example of `docker run` +- Fix API documentation for creating and starting Privileged containers +- Add missing "name" parameter documentation on "/containers/create" +* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration +- Update the 1.8 API documentation with some additions that were added to the docs for 1.7 + +#### Hack + +- Add missing libdevmapper dependency to the packagers documentation +* Update minimum Go requirement to a hard line at Go 1.2+ +* Many minor improvements to the Vagrantfile ++ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) ++ Add coverprofile generation reporting +- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually +* Update Dockerfile to be more canonical and have less spurious warnings during build +- Fix some miscellaneous `docker pull` progress bar display issues +* Migrate more miscellaneous packages under the "pkg" folder +* Update TextMate highlighting to automatically be enabled for files named "Dockerfile" +* Reorganize syntax highlighting files under a common "contrib/syntax" directory +* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation +* Add support for container names in bash completion + +#### Packaging + ++ Add an official Docker client binary for Darwin (Mac OS X) +* Remove empty "Vendor" string and added "License" on deb package ++ Add a stubbed version of "/etc/default/docker" in the deb package + +#### Runtime + +* Update layer application to extract tars in place, avoiding file churn while handling whiteouts +- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) +* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) ++ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions +- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files +* Update container name validation to include '.' +- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected +* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler +* Update to use proper box-drawing characters everywhere in `docker images -tree` +* Move MTU setting from LXC configuration to directly use netlink +* Add `-S` option to external tar invocation for more efficient spare file handling ++ Add arch/os info to User-Agent string, especially for registry requests ++ Add `-mtu` option to Docker daemon for configuring MTU +- Fix `docker build` to exit with a non-zero exit code on error ++ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation + +## 0.7.2 (2013-12-16) + +#### Runtime + ++ Validate container names on creation with standard regex +* Increase maximum image depth to 127 from 42 +* Continue to move api endpoints to the job api ++ Add -bip flag to allow specification of dynamic bridge IP via CIDR +- Allow bridge creation when ipv6 is not enabled on certain systems +* Set hostname and IP address from within dockerinit +* Drop capabilities from within dockerinit +- Fix volumes on host when symlink is present the image +- Prevent deletion of image if ANY container is depending on it even if the container is not running +* Update docker push to use new progress display +* Use os.Lstat to allow mounting unix sockets when inspecting volumes +- Adjust handling of inactive user login +- Add missing defines in devicemapper for older kernels +- Allow untag operations with no container validation +- Add auth config to docker build + +#### Documentation + +* Add more information about Docker logging ++ Add RHEL documentation +* Add a direct example for changing the CMD that is run in a container +* Update Arch installation documentation ++ Add section on Trusted Builds ++ Add Network documentation page + +#### Other + ++ Add new cover bundle for providing code coverage reporting +* Separate integration tests in bundles +* Make Tianon the hack maintainer +* Update mkimage-debootstrap with more tweaks for keeping images small +* Use https to get the install script +* Remove vendored dotcloud/tar now that Go 1.2 has been released + +## 0.7.1 (2013-12-05) + +#### Documentation + ++ Add @SvenDowideit as documentation maintainer ++ Add links example ++ Add documentation regarding ambassador pattern ++ Add Google Cloud Platform docs ++ Add dockerfile best practices +* Update doc for RHEL +* Update doc for registry +* Update Postgres examples +* Update doc for Ubuntu install +* Improve remote api doc + +#### Runtime + ++ Add hostconfig to docker inspect ++ Implement `docker log -f` to stream logs ++ Add env variable to disable kernel version warning ++ Add -format to `docker inspect` ++ Support bind-mount for files +- Fix bridge creation on RHEL +- Fix image size calculation +- Make sure iptables are called even if the bridge already exists +- Fix issue with stderr only attach +- Remove init layer when destroying a container +- Fix same port binding on different interfaces +- `docker build` now returns the correct exit code +- Fix `docker port` to display correct port +- `docker build` now check that the dockerfile exists client side +- `docker attach` now returns the correct exit code +- Remove the name entry when the container does not exist + +#### Registry + +* Improve progress bars, add ETA for downloads +* Simultaneous pulls now waits for the first to finish instead of failing +- Tag only the top-layer image when pushing to registry +- Fix issue with offline image transfer +- Fix issue preventing using ':' in password for registry + +#### Other + ++ Add pprof handler for debug ++ Create a Makefile +* Use stdlib tar that now includes fix +* Improve make.sh test script +* Handle SIGQUIT on the daemon +* Disable verbose during tests +* Upgrade to go1.2 for official build +* Improve unit tests +* The test suite now runs all tests even if one fails +* Refactor C in Go (Devmapper) +- Fix OS X compilation + +## 0.7.0 (2013-11-25) + +#### Notable features since 0.6.0 + +* Storage drivers: choose from aufs, device-mapper, or vfs. +* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. +* Links: compose complex software stacks by connecting containers to each other. +* Container naming: organize your containers by giving them memorable names. +* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. +* Offline transfer: push and pull images to the filesystem without losing information. +* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. + +## 0.6.7 (2013-11-21) + +#### Runtime + +* Improve stability, fixes some race conditions +* Skip the volumes mounted when deleting the volumes of container. +* Fix layer size computation: handle hard links correctly +* Use the work Path for docker cp CONTAINER:PATH +* Fix tmp dir never cleanup +* Speedup docker ps +* More informative error message on name collisions +* Fix nameserver regex +* Always return long id's +* Fix container restart race condition +* Keep published ports on docker stop;docker start +* Fix container networking on Fedora +* Correctly express "any address" to iptables +* Fix network setup when reconnecting to ghost container +* Prevent deletion if image is used by a running container +* Lock around read operations in graph + +#### RemoteAPI + +* Return full ID on docker rmi + +#### Client + ++ Add -tree option to images ++ Offline image transfer +* Exit with status 2 on usage error and display usage on stderr +* Do not forward SIGCHLD to container +* Use string timestamp for docker events -since + +#### Other + +* Update to go 1.2rc5 ++ Add /etc/default/docker support to upstart + +## 0.6.6 (2013-11-06) + +#### Runtime + +* Ensure container name on register +* Fix regression in /etc/hosts ++ Add lock around write operations in graph +* Check if port is valid +* Fix restart runtime error with ghost container networking ++ Add some more colors and animals to increase the pool of generated names +* Fix issues in docker inspect ++ Escape apparmor confinement ++ Set environment variables using a file. +* Prevent docker insert to erase something ++ Prevent DNS server conflicts in CreateBridgeIface ++ Validate bind mounts on the server side ++ Use parent image config in docker build +* Fix regression in /etc/hosts + +#### Client + ++ Add -P flag to publish all exposed ports ++ Add -notrunc and -q flags to docker history +* Fix docker commit, tag and import usage ++ Add stars, trusted builds and library flags in docker search +* Fix docker logs with tty + +#### RemoteAPI + +* Make /events API send headers immediately +* Do not split last column docker top ++ Add size to history + +#### Other + ++ Contrib: Desktop integration. Firefox usecase. ++ Dockerfile: bump to go1.2rc3 + +## 0.6.5 (2013-10-29) + +#### Runtime + ++ Containers can now be named ++ Containers can now be linked together for service discovery ++ 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors ++ Automatically start crashed containers after a reboot ++ Expose IP, port, and proto as separate environment vars for container links +* Allow ports to be published to specific ips +* Prohibit inter-container communication by default +- Ignore ErrClosedPipe for stdin in Container.Attach +- Remove unused field kernelVersion +* Fix issue when mounting subdirectories of /mnt in container +- Fix untag during removal of images +* Check return value of syscall.Chdir when changing working directory inside dockerinit + +#### Client + +- Only pass stdin to hijack when needed to avoid closed pipe errors +* Use less reflection in command-line method invocation +- Monitor the tty size after starting the container, not prior +- Remove useless os.Exit() calls after log.Fatal + +#### Hack + ++ Add initial init scripts library and a safer Ubuntu packaging script that works for Debian +* Add -p option to invoke debootstrap with http_proxy +- Update install.sh with $sh_c to get sudo/su for modprobe +* Update all the mkimage scripts to use --numeric-owner as a tar argument +* Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues + +#### Other + +* Documentation: Fix the flags for nc in example +* Testing: Remove warnings and prevent mount issues +- Testing: Change logic for tty resize to avoid warning in tests +- Builder: Fix race condition in docker build with verbose output +- Registry: Fix content-type for PushImageJSONIndex method +* Contrib: Improve helper tools to generate debian and Arch linux server images + +## 0.6.4 (2013-10-16) + +#### Runtime + +- Add cleanup of container when Start() fails +* Add better comments to utils/stdcopy.go +* Add utils.Errorf for error logging ++ Add -rm to docker run for removing a container on exit +- Remove error messages which are not actually errors +- Fix `docker rm` with volumes +- Fix some error cases where an HTTP body might not be closed +- Fix panic with wrong dockercfg file +- Fix the attach behavior with -i +* Record termination time in state. +- Use empty string so TempDir uses the OS's temp dir automatically +- Make sure to close the network allocators ++ Autorestart containers by default +* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` +* lxc: Allow set_file_cap capability in container +- Move run -rm to the cli only +* Split stdout stderr +* Always create a new session for the container + +#### Testing + +- Add aggregated docker-ci email report +- Add cleanup to remove leftover containers +* Add nightly release to docker-ci +* Add more tests around auth.ResolveAuthConfig +- Remove a few errors in tests +- Catch errClosing error when TCP and UDP proxies are terminated +* Only run certain tests with TESTFLAGS='-run TestName' make.sh +* Prevent docker-ci to test closing PRs +* Replace panic by log.Fatal in tests +- Increase TestRunDetach timeout + +#### Documentation + +* Add initial draft of the Docker infrastructure doc +* Add devenvironment link to CONTRIBUTING.md +* Add `apt-get install curl` to Ubuntu docs +* Add explanation for export restrictions +* Add .dockercfg doc +* Remove Gentoo install notes about #1422 workaround +* Fix help text for -v option +* Fix Ping endpoint documentation +- Fix parameter names in docs for ADD command +- Fix ironic typo in changelog +* Various command fixes in postgres example +* Document how to edit and release docs +- Minor updates to `postgresql_service.rst` +* Clarify LGTM process to contributors +- Corrected error in the package name +* Document what `vagrant up` is actually doing ++ improve doc search results +* Cleanup whitespace in API 1.5 docs +* use angle brackets in MAINTAINER example email +* Update archlinux.rst ++ Changes to a new style for the docs. Includes version switcher. +* Formatting, add information about multiline json +* Improve registry and index REST API documentation +- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 +* Update Gentoo installation documentation now that we're in the portage tree proper +* Cleanup and reorganize docs and tooling for contributors and maintainers +- Minor spelling correction of protocoll -> protocol + +#### Contrib + +* Add vim syntax highlighting for Dockerfiles from @honza +* Add mkimage-arch.sh +* Reorganize contributed completion scripts to add zsh completion + +#### Hack + +* Add vagrant user to the docker group +* Add proper bash completion for "docker push" +* Add xz utils as a runtime dep +* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates ++ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link +* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly ++ Add @tianon to hack/MAINTAINERS +* Improve network performance for VirtualBox +* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) +- Fix contrib/mkimage-debian.sh apt caching prevention ++ Add Dockerfile.tmLanguage to contrib +* Configured FPM to make /etc/init/docker.conf a config file +* Enable SSH Agent forwarding in Vagrant VM +* Several small tweaks/fixes for contrib/mkimage-debian.sh + +#### Other + +- Builder: Abort build if mergeConfig returns an error and fix duplicate error message +- Packaging: Remove deprecated packaging directory +- Registry: Use correct auth config when logging in. +- Registry: Fix the error message so it is the same as the regex + +## 0.6.3 (2013-09-23) + +#### Packaging + +* Add 'docker' group on install for ubuntu package +* Update tar vendor dependency +* Download apt key over HTTPS + +#### Runtime + +- Only copy and change permissions on non-bindmount volumes +* Allow multiple volumes-from +- Fix HTTP imports from STDIN + +#### Documentation + +* Update section on extracting the docker binary after build +* Update development environment docs for new build process +* Remove 'base' image from documentation + +#### Other + +- Client: Fix detach issue +- Registry: Update regular expression to match index + +## 0.6.2 (2013-09-17) + +#### Runtime + ++ Add domainname support ++ Implement image filtering with path.Match +* Remove unnecessary warnings +* Remove os/user dependency +* Only mount the hostname file when the config exists +* Handle signals within the `docker login` command +- UID and GID are now also applied to volumes +- `docker start` set error code upon error +- `docker run` set the same error code as the process started + +#### Builder + ++ Add -rm option in order to remove intermediate containers +* Allow multiline for the RUN instruction + +#### Registry + +* Implement login with private registry +- Fix push issues + +#### Other + ++ Hack: Vendor all dependencies +* Remote API: Bump to v1.5 +* Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. +* Documentation: General improvements + +## 0.6.1 (2013-08-23) + +#### Registry + +* Pass "meta" headers in API calls to the registry + +#### Packaging + +- Use correct upstart script with new build tool +- Use libffi-dev, don`t build it from sources +- Remove duplicate mercurial install command + +## 0.6.0 (2013-08-22) + +#### Runtime + ++ Add lxc-conf flag to allow custom lxc options ++ Add an option to set the working directory +* Add Image name to LogEvent tests ++ Add -privileged flag and relevant tests, docs, and examples +* Add websocket support to /container//attach/ws +* Add warning when net.ipv4.ip_forwarding = 0 +* Add hostname to environment +* Add last stable version in `docker version` +- Fix race conditions in parallel pull +- Fix Graph ByParent() to generate list of child images per parent image. +- Fix typo: fmt.Sprint -> fmt.Sprintf +- Fix small \n error un docker build +* Fix to "Inject dockerinit at /.dockerinit" +* Fix #910. print user name to docker info output +* Use Go 1.1.2 for dockerbuilder +* Use ranged for loop on channels +- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete +- Improve CMD, ENTRYPOINT, and attach docs. +- Improve connect message with socket error +- Load authConfig only when needed and fix useless WARNING +- Show tag used when image is missing +* Apply volumes-from before creating volumes +- Make docker run handle SIGINT/SIGTERM +- Prevent crash when .dockercfg not readable +- Install script should be fetched over https, not http. +* API, issue 1471: Use groups for socket permissions +- Correctly detect IPv4 forwarding +* Mount /dev/shm as a tmpfs +- Switch from http to https for get.docker.io +* Let userland proxy handle container-bound traffic +* Update the Docker CLI to specify a value for the "Host" header. +- Change network range to avoid conflict with EC2 DNS +- Reduce connect and read timeout when pinging the registry +* Parallel pull +- Handle ip route showing mask-less IP addresses +* Allow ENTRYPOINT without CMD +- Always consider localhost as a domain name when parsing the FQN repos name +* Refactor checksum + +#### Documentation + +* Add MongoDB image example +* Add instructions for creating and using the docker group +* Add sudo to examples and installation to documentation +* Add ufw doc +* Add a reference to ps -a +* Add information about Docker`s high level tools over LXC. +* Fix typo in docs for docker run -dns +* Fix a typo in the ubuntu installation guide +* Fix to docs regarding adding docker groups +* Update default -H docs +* Update readme with dependencies for building +* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 +* PostgreSQL service example in documentation +* Suggest installing linux-headers by default. +* Change the twitter handle +* Clarify Amazon EC2 installation +* 'Base' image is deprecated and should no longer be referenced in the docs. +* Move note about officially supported kernel +- Solved the logo being squished in Safari + +#### Builder + ++ Add USER instruction do Dockerfile ++ Add workdir support for the Buildfile +* Add no cache for docker build +- Fix docker build and docker events output +- Only count known instructions as build steps +- Make sure ENV instruction within build perform a commit each time +- Forbid certain paths within docker build ADD +- Repository name (and optionally a tag) in build usage +- Make sure ADD will create everything in 0755 + +#### Remote API + +* Sort Images by most recent creation date. +* Reworking opaque requests in registry module +* Add image name in /events +* Use mime pkg to parse Content-Type +* 650 http utils and user agent field + +#### Hack + ++ Bash Completion: Limit commands to containers of a relevant state +* Add docker dependencies coverage testing into docker-ci + +#### Packaging + ++ Docker-brew 0.5.2 support and memory footprint reduction +* Add new docker dependencies into docker-ci +- Revert "docker.upstart: avoid spawning a `sh` process" ++ Docker-brew and Docker standard library ++ Release docker with docker +* Fix the upstart script generated by get.docker.io +* Enabled the docs to generate manpages. +* Revert Bind daemon to 0.0.0.0 in Vagrant. + +#### Register + +* Improve auth push +* Registry unit tests + mock registry + +#### Tests + +* Improve TestKillDifferentUser to prevent timeout on buildbot +- Fix typo in TestBindMounts (runContainer called without image) +* Improve TestGetContainersTop so it does not rely on sleep +* Relax the lo interface test to allow iface index != 1 +* Add registry functional test to docker-ci +* Add some tests in server and utils + +#### Other + +* Contrib: bash completion script +* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host +* Don`t read from stdout when only attached to stdin + +## 0.5.3 (2013-08-13) + +#### Runtime + +* Use docker group for socket permissions +- Spawn shell within upstart script +- Handle ip route showing mask-less IP addresses +- Add hostname to environment + +#### Builder + +- Make sure ENV instruction within build perform a commit each time + +## 0.5.2 (2013-08-08) + +* Builder: Forbid certain paths within docker build ADD +- Runtime: Change network range to avoid conflict with EC2 DNS +* API: Change daemon to listen on unix socket by default + +## 0.5.1 (2013-07-30) + +#### Runtime + ++ Add `ps` args to `docker top` ++ Add support for container ID files (pidfile like) ++ Add container=lxc in default env ++ Support networkless containers with `docker run -n` and `docker -d -b=none` +* Stdout/stderr logs are now stored in the same file as JSON +* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. +* Change .dockercfg format to json and support multiple auth remote +- Do not override volumes from config +- Fix issue with EXPOSE override + +#### API + ++ Docker client now sets useragent (RFC 2616) ++ Add /events endpoint + +#### Builder + ++ ADD command now understands URLs ++ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables +- Create directories with 755 instead of 700 within ADD instruction + +#### Hack + +* Simplify unit tests with helpers +* Improve docker.upstart event +* Add coverage testing into docker-ci + +## 0.5.0 (2013-07-17) + +#### Runtime + ++ List all processes running inside a container with 'docker top' ++ Host directories can be mounted as volumes with 'docker run -v' ++ Containers can expose public UDP ports (eg, '-p 123/udp') ++ Optionally specify an exact public port (eg. '-p 80:4500') +* 'docker login' supports additional options +- Don't save a container`s hostname when committing an image. + +#### Registry + ++ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries +- Fix issues when uploading images to a private registry + +#### Builder + ++ ENTRYPOINT instruction sets a default binary entry point to a container ++ VOLUME instruction marks a part of the container as persistent data +* 'docker build' displays the full output of a build by default + +## 0.4.8 (2013-07-01) + ++ Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. +- Tests: Fix issues in the test suite + +## 0.4.7 (2013-06-28) + +#### Remote API + +* The progress bar updates faster when downloading and uploading large files +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version ++ Host directories can be mounted as volumes with 'docker run -b' +- fix an issue when only attaching to stdin +* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts + +#### Hack + +* Improve test suite and dev environment +* Remove dependency on unit tests on 'os/user' + +#### Other + +* Registry: easier push/pull to a custom registry ++ Documentation: add terminology section + +## 0.4.6 (2013-06-22) + +- Runtime: fix a bug which caused creation of empty images (and volumes) to crash. + +## 0.4.5 (2013-06-21) + ++ Builder: 'docker build git://URL' fetches and builds a remote git repository +* Runtime: 'docker ps -s' optionally prints container size +* Tests: improved and simplified +- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. +- Builder: fix a regression when using ADD with single regular file. + +## 0.4.4 (2013-06-19) + +- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. + +## 0.4.3 (2013-06-19) + +#### Builder + ++ ADD of a local file will detect tar archives and unpack them +* ADD improvements: use tar for copy + automatically unpack local archives +* ADD uses tar/untar for copies instead of calling 'cp -ar' +* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. +- Fix a bug which caused builds to fail if ADD was the first command +* Nicer output for 'docker build' + +#### Runtime + +* Remove bsdtar dependency +* Add unix socket and multiple -H support +* Prevent rm of running containers +* Use go1.1 cookiejar +- Fix issue detaching from running TTY container +- Forbid parallel push/pull for a single image/repo. Fixes #311 +- Fix race condition within Run command when attaching. + +#### Client + +* HumanReadable ProgressBar sizes in pull +* Fix docker version`s git commit output + +#### API + +* Send all tags on History API call +* Add tag lookup to history command. Fixes #882 + +#### Documentation + +- Fix missing command in irc bouncer example + +## 0.4.2 (2013-06-17) + +- Packaging: Bumped version to work around an Ubuntu bug + +## 0.4.1 (2013-06-17) + +#### Remote Api + ++ Add flag to enable cross domain requests ++ Add images and containers sizes in docker ps and docker images + +#### Runtime + ++ Configure dns configuration host-wide with 'docker -d -dns' ++ Detect faulty DNS configuration and replace it with a public default ++ Allow docker run : ++ You can now specify public port (ex: -p 80:4500) +* Improve image removal to garbage-collect unreferenced parents + +#### Client + +* Allow multiple params in inspect +* Print the container id before the hijack in `docker run` + +#### Registry + +* Add regexp check on repo`s name +* Move auth to the client +- Remove login check on pull + +#### Other + +* Vagrantfile: Add the rest api port to vagrantfile`s port_forward +* Upgrade to Go 1.1 +- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n + +## 0.4.0 (2013-06-03) + +#### Builder + ++ Introducing Builder ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile + +#### Remote API + ++ Introducing Remote API ++ control Docker programmatically using a simple HTTP/json API + +#### Runtime + +* Various reliability and usability improvements + +## 0.3.4 (2013-05-30) + +#### Builder + ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile ++ 'docker build -t FOO' applies the tag FOO to the newly built container. + +#### Runtime + ++ Interactive TTYs correctly handle window resize +* Fix how configuration is merged between layers + +#### Remote API + ++ Split stdout and stderr on 'docker run' ++ Optionally listen on a different IP and port (use at your own risk) + +#### Documentation + +* Improve install instructions. + +## 0.3.3 (2013-05-23) + +- Registry: Fix push regression +- Various bugfixes + +## 0.3.2 (2013-05-09) + +#### Registry + +* Improve the checksum process +* Use the size to have a good progress bar while pushing +* Use the actual archive if it exists in order to speed up the push +- Fix error 400 on push + +#### Runtime + +* Store the actual archive on commit + +## 0.3.1 (2013-05-08) + +#### Builder + ++ Implement the autorun capability within docker builder ++ Add caching to docker builder ++ Add support for docker builder with native API as top level command ++ Implement ENV within docker builder +- Check the command existence prior create and add Unit tests for the case +* use any whitespaces instead of tabs + +#### Runtime + ++ Add go version to debug infos +* Kernel version - don`t show the dash if flavor is empty + +#### Registry + ++ Add docker search top level command in order to search a repository +- Fix pull for official images with specific tag +- Fix issue when login in with a different user and trying to push +* Improve checksum - async calculation + +#### Images + ++ Output graph of images to dot (graphviz) +- Fix ByParent function + +#### Documentation + ++ New introduction and high-level overview ++ Add the documentation for docker builder +- CSS fix for docker documentation to make REST API docs look better. +- Fix CouchDB example page header mistake +- Fix README formatting +* Update www.docker.io website. + +#### Other + ++ Website: new high-level overview +- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc +* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker + +## 0.3.0 (2013-05-06) + +#### Runtime + +- Fix the command existence check +- strings.Split may return an empty string on no match +- Fix an index out of range crash if cgroup memory is not + +#### Documentation + +* Various improvements ++ New example: sharing data between 2 couchdb databases + +#### Other + +* Vagrant: Use only one deb line in /etc/apt ++ Registry: Implement the new registry + +## 0.2.2 (2013-05-03) + ++ Support for data volumes ('docker run -v=PATH') ++ Share data volumes between containers ('docker run -volumes-from') ++ Improve documentation +* Upgrade to Go 1.0.3 +* Various upgrades to the dev environment for contributors + +## 0.2.1 (2013-05-01) + ++ 'docker commit -run' bundles a layer with default runtime options: command, ports etc. +* Improve install process on Vagrant ++ New Dockerfile operation: "maintainer" ++ New Dockerfile operation: "expose" ++ New Dockerfile operation: "cmd" ++ Contrib script to build a Debian base layer ++ 'docker -d -r': restart crashed containers at daemon startup +* Runtime: improve test coverage + +## 0.2.0 (2013-04-23) + +- Runtime: ghost containers can be killed and waited for +* Documentation: update install instructions +- Packaging: fix Vagrantfile +- Development: automate releasing binaries and ubuntu packages ++ Add a changelog +- Various bugfixes + +## 0.1.8 (2013-04-22) + +- Dynamically detect cgroup capabilities +- Issue stability warning on kernels <3.8 +- 'docker push' buffers on disk instead of memory +- Fix 'docker diff' for removed files +- Fix 'docker stop' for ghost containers +- Fix handling of pidfile +- Various bugfixes and stability improvements + +## 0.1.7 (2013-04-18) + +- Container ports are available on localhost +- 'docker ps' shows allocated TCP ports +- Contributors can run 'make hack' to start a continuous integration VM +- Streamline ubuntu packaging & uploading +- Various bugfixes and stability improvements + +## 0.1.6 (2013-04-17) + +- Record the author an image with 'docker commit -author' + +## 0.1.5 (2013-04-17) + +- Disable standalone mode +- Use a custom DNS resolver with 'docker -d -dns' +- Detect ghost containers +- Improve diagnosis of missing system capabilities +- Allow disabling memory limits at compile time +- Add debian packaging +- Documentation: installing on Arch Linux +- Documentation: running Redis on docker +- Fix lxc 0.9 compatibility +- Automatically load aufs module +- Various bugfixes and stability improvements + +## 0.1.4 (2013-04-09) + +- Full support for TTY emulation +- Detach from a TTY session with the escape sequence `C-p C-q` +- Various bugfixes and stability improvements +- Minor UI improvements +- Automatically create our own bridge interface 'docker0' + +## 0.1.3 (2013-04-04) + +- Choose TCP frontend port with '-p :PORT' +- Layer format is versioned +- Major reliability improvements to the process manager +- Various bugfixes and stability improvements + +## 0.1.2 (2013-04-03) + +- Set container hostname with 'docker run -h' +- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' +- Various bugfixes and stability improvements +- UI polish +- Progress bar on push/pull +- Use XZ compression by default +- Make IP allocator lazy + +## 0.1.1 (2013-03-31) + +- Display shorthand IDs for convenience +- Stabilize process management +- Layers can include a commit message +- Simplified 'docker attach' +- Fix support for re-attaching +- Various bugfixes and stability improvements +- Auto-download at run +- Auto-login on push +- Beefed up documentation + +## 0.1.0 (2013-03-23) + +Initial public release + +- Implement registry in order to push/pull images +- TCP port allocation +- Fix termcaps on Linux +- Add documentation +- Add Vagrant support with Vagrantfile +- Add unit tests +- Add repository/tags to ease image management +- Improve the layer implementation diff --git a/components/engine/CONTRIBUTING.md b/components/engine/CONTRIBUTING.md new file mode 100644 index 0000000000..abf6c45a3c --- /dev/null +++ b/components/engine/CONTRIBUTING.md @@ -0,0 +1,455 @@ +# Contributing to Docker + +Want to hack on Docker? Awesome! We have a contributor's guide that explains +[setting up a Docker development environment and the contribution +process](https://docs.docker.com/opensource/project/who-written-for/). + +[![Contributors guide](docs/static_files/contributors.png)](https://docs.docker.com/opensource/project/who-written-for/) + +This page contains information about reporting issues as well as some tips and +guidelines useful to experienced open source contributors. Finally, make sure +you read our [community guidelines](#docker-community-guidelines) before you +start participating. + +## Topics + +* [Reporting Security Issues](#reporting-security-issues) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-other-issues) +* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +Security reports are greatly appreciated and we will publicly thank you for it. +We also like to send gifts—if you're into Docker schwag, make sure to let +us know. We currently do not offer a paid security bounty program, but are not +ruling it out in the future. + + +## Reporting other issues + +A great way to contribute to the project is to send a detailed report when you +encounter an issue. We always appreciate a well-written, thorough bug report, +and will thank you for it! + +Check that [our issue database](https://github.com/docker/docker/issues) +doesn't already include that problem or suggestion before submitting an issue. +If you find a match, you can use the "subscribe" button to get notified on +updates. Do *not* leave random "+1" or "I have this too" comments, as they +only clutter the discussion, and don't help resolving it. However, if you +have ways to reproduce the issue or have additional information that may help +resolving the issue, please leave a comment. + +When reporting issues, always include: + +* The output of `docker version`. +* The output of `docker info`. + +Also include the steps required to reproduce the problem if possible and +applicable. This information will help us review and fix your issue faster. +When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). +Don't forget to remove sensitive data from your logfiles before posting (you can +replace those parts with "REDACTED"). + +## Quick contribution tips and guidelines + +This section gives the experienced contributor some tips and guidelines. + +### Pull requests are always welcome + +Not sure if that typo is worth a pull request? Found a bug and know how to fix +it? Do it! We will appreciate it. Any significant improvement should be +documented as [a GitHub issue](https://github.com/docker/docker/issues) before +anybody starts working on it. + +We are always thrilled to receive pull requests. We do our best to process them +quickly. If your pull request is not accepted on the first try, +don't get discouraged! Our contributor's guide explains [the review process we +use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). + +### Design and cleanup proposals + +You can propose new designs for existing Docker features. You can also design +entirely new features. We really appreciate contributors who want to refactor or +otherwise cleanup our project. For information on making these types of +contributions, see [the advanced contribution +section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in +the contributors guide. + +We try hard to keep Docker lean and focused. Docker can't do everything for +everybody. This means that we might decide against incorporating a new feature. +However, there might be a way to implement that feature *on top of* Docker. + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + + + + + +
Forums + A public forum for users to discuss questions and explore current design patterns and + best practices about Docker and related projects in the Docker Ecosystem. To participate, + just log in with your Docker Hub account on https://forums.docker.com. +
Internet Relay Chat (IRC) +

+ IRC a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+

+ Read our IRC quickstart guide + for an easy way to get started. +

+
Google Group + The docker-dev + group is for contributors and other people contributing to the Docker project. + You can join them without a google account by sending an email to + docker-dev+subscribe@googlegroups.com. + After receiving the join-request message, you can simply reply to that to confirm the subscription. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has thousands of Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ + +### Conventions + +Fork the repository and make changes on your fork in a feature branch: + +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of + the issue. +- If it's a feature branch, create an enhancement issue to announce + your intentions, and name it XXXX-something where XXXX is the number of the + issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. [Run the full test +suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before +submitting a pull request. + +Update the documentation when creating or modifying features. Test your +documentation changes for clarity, concision, and correctness, as well as a +clean documentation build. See our contributors guide for [our style +guide](https://docs.docker.com/opensource/doc-style) and instructions on [building +the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. + +Pull request descriptions should be as clear as possible and include a reference +to all the issues that they address. + +### Successful Changes + +Before contributing large or high impact changes, make the effort to coordinate +with the maintainers of the project before submitting a pull request. This +prevents you from doing extra work that may or may not be merged. + +Large PRs that are just submitted without any prior communication are unlikely +to be successful. + +While pull requests are the methodology for submitting changes to code, changes +are much more likely to be accepted if they are accompanied by additional +engineering work. While we don't define this explicitly, most of these goals +are accomplished through communication of the design goals and subsequent +solutions. Often times, it helps to first state the problem before presenting +solutions. + +Typically, the best methods of accomplishing this are to submit an issue, +stating the problem. This issue can include a problem statement and a +checklist with requirements. If solutions are proposed, alternatives should be +listed and eliminated. Even if the criteria for elimination of a solution is +frivolous, say so. + +Larger changes typically work best with design documents. These are focused on +providing context to the design at the time the feature was conceived and can +inform future documentation contributions. + +### Commit Messages + +Commit messages must start with a capitalized and short summary (max. 50 chars) +written in the imperative, followed by an optional, more detailed explanatory +text which is separated from the summary by an empty line. + +Commit messages should follow best practices, including explaining the context +of the problem and how it was solved, including in caveats or follow up changes +required. They should tell the story of the change and provide readers +understanding of what led to it. + +If you're lost about what this even means, please see [How to Write a Git +Commit Message](http://chris.beams.io/posts/git-commit/) for a start. + +In practice, the best approach to maintaining a nice commit message is to +leverage a `git add -p` and `git commit --amend` to formulate a solid +changeset. This allows one to piece together a change, as information becomes +available. + +If you squash a series of commits, don't just submit that. Re-write the commit +message, as if the series of commits was a single stroke of brilliance. + +That said, there is no requirement to have a single commit for a PR, as long as +each commit tells the story. For example, if there is a feature that requires a +package, it might make sense to have the package in a separate commit then have +a subsequent commit that uses it. + +Remember, you're telling part of the story with the commit message. Don't make +your chapter weird. + +### Review + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Post +a comment after pushing. New commits show up in the pull request automatically, +but the reviewers are notified only when you comment. + +Pull requests must be cleanly rebased on top of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + +Before you make a pull request, squash your commits into logical units of work +using `git rebase -i` and `git push -f`. A logical unit of work is a consistent +set of patches that should be reviewed together: for example, upgrading the +version of a vendored dependency and taking advantage of its now available new +feature constitute two separate units of work. Implementing a new function and +calling it in another file constitute a single logical unit of work. The very +high majority of submissions should have a single commit, so if in doubt: squash +down to one. + +After every commit, [make sure the test suite passes] +(https://docs.docker.com/opensource/project/test-and-docs/). Include documentation +changes in the same pull request so that a revert would remove all traces of +the feature or fix. + +Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that +close an issue. Including references automatically closes the issue on a merge. + +Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly +from the Git history. + +Please see the [Coding Style](#coding-style) for further guidelines. + +### Merge approval + +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to +indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. + +For more details, see the [MAINTAINERS](MAINTAINERS) page. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +### How can I become a maintainer? + +The procedures for adding new maintainers are explained in the +global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) +file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) +repository. + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +## Docker community guidelines + +We want to keep the Docker community awesome, growing and collaborative. We need +your help to keep it that way. To help with this we've come up with some general +guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: + no regional, racial, gender, or other abuse will be tolerated. We like + nice people way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community feel + welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break + the law. + +* Stay on topic: Make sure that you are posting to the correct channel and + avoid off-topic discussions. Remember when you update an issue or respond + to an email you are potentially sending to a large number of people. Please + consider this before you update. Also remember that nobody likes spam. + +* Don't send email to the maintainers: There's no need to send email to the + maintainers to ask them to investigate an issue or to take a look at a + pull request. Instead of sending an email, GitHub mentions should be + used to ping maintainers to review a pull request, a proposal or an + issue. + +### Guideline violations — 3 strikes method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't hold a + grudge. + +* People who commit minor infractions will get some education, rather than + hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how much + you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or forgiveness. + +* Contact abuse@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with a + fair solution if there has been a misunderstanding. + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](https://golang.org/doc/effective_go.html). The +[Go Blog](https://blog.golang.org) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/components/engine/Dockerfile b/components/engine/Dockerfile new file mode 100644 index 0000000000..4bfd0b0580 --- /dev/null +++ b/components/engine/Dockerfile @@ -0,0 +1,248 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run -e DOCKER_GITCOMMIT=foo --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + apt-utils \ + aufs-tools \ + automake \ + bash-completion \ + binutils-mingw-w64 \ + bsdmainutils \ + btrfs-tools \ + build-essential \ + clang \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + gcc-mingw-w64 \ + git \ + iptables \ + jq \ + less \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libnl-3-dev \ + libprotobuf-c0-dev \ + libprotobuf-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + net-tools \ + pkg-config \ + protobuf-compiler \ + protobuf-c-compiler \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + tar \ + vim \ + vim-common \ + xfsprogs \ + zip \ + --no-install-recommends \ + && pip install awscli==1.10.15 +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Configure the container for OSX cross compilation +ENV OSX_SDK MacOSX10.11.sdk +ENV OSX_CROSS_COMMIT a9317c18a3a457ca0a657f08cc4d0d43c6cf8953 +RUN set -x \ + && export OSXCROSS_PATH="/osxcross" \ + && git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \ + && ( cd $OSXCROSS_PATH && git checkout -q $OSX_CROSS_COMMIT) \ + && curl -sSL https://s3.dockerproject.org/darwin/v2/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \ + && UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh +ENV PATH /osxcross/target/bin:$PATH + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +ENV GO_VERSION 1.8.1 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 \ + freebsd/amd64 freebsd/386 freebsd/arm \ + windows/amd64 windows/386 \ + solaris/amd64 + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install CRIU for checkpoint/restore support +ENV CRIU_VERSION 2.12.1 +# Install dependancy packages specific to criu +RUN apt-get install libnet-dev -y && \ + mkdir -p /usr/src/criu \ + && curl -sSL https://github.com/xemul/criu/archive/v${CRIU_VERSION}.tar.gz | tar -v -C /usr/src/criu/ -xz --strip-components=1 \ + && cd /usr/src/criu \ + && make \ + && make install-criu + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT 4a08d04aef0595322e1b5ac7c52f28a931da85a5 +# To run integration tests docker-pycreds is required. +# Before running the integration tests conftest.py is +# loaded which results in loads auth.py that +# imports the docker-pycreds module. +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install docker-pycreds==0.2.1 \ + && pip install -r test-requirements.txt + +# Install yamllint for validating swagger.yaml +RUN pip install yamllint==1.5.0 + +# Install go-swagger for validating swagger.yaml +ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb +RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \ + && (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \ + && go install -v github.com/go-swagger/go-swagger/cmd/swagger + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc +# Add integration helps to bashrc +RUN echo "source $PWD/hack/make/.integration-test-helpers" >> /etc/bash.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + buildpack-deps:jessie@sha256:85b379ec16065e4fe4127eb1c5fb1bcc03c559bd36dbb2e22ff496de55925fa6 \ + busybox:latest@sha256:32f093055929dbc23dec4d03e09dfe971f5973a9ca5cf059cbfb644c206aa83f \ + debian:jessie@sha256:72f784399fd2719b4cb4e16ef8e369a39dc67f53d978cd3e2e7bf4e502c7b793 \ + hello-world:latest@sha256:c5515758d4c5e1e838e9cd307f6c6a0d620b5e07e6f927b07d05f6d12a1ac8d7 +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy dockercli +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata dockercli +ENV PATH=/usr/local/cli:$PATH + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/components/engine/Dockerfile.aarch64 b/components/engine/Dockerfile.aarch64 new file mode 100644 index 0000000000..ad1e5188be --- /dev/null +++ b/components/engine/Dockerfile.aarch64 @@ -0,0 +1,202 @@ +# This file describes the standard way to build Docker on aarch64, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.aarch64 . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM aarch64/ubuntu:xenial + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + g++ \ + gcc \ + git \ + iptables \ + jq \ + libapparmor-dev \ + libc6-dev \ + libcap-dev \ + libltdl-dev \ + libsystemd-dev \ + libyaml-dev \ + mercurial \ + net-tools \ + parallel \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-setuptools \ + python-websocket \ + golang-go \ + iproute2 \ + iputils-ping \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support aarch64 properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# We don't have official binary golang 1.7.5 tarballs for ARM64, eigher for Go or +# bootstrap, so we use golang-go (1.6) as bootstrap to build Go from source code. +# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because +# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8. +ENV GO_VERSION 1.8.1 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /go/bin:/usr/src/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Only install one version of the registry, because old version which support +# schema1 manifests is not working on ARM64, we should skip integration-cli +# tests for schema1 manifests on ARM64. +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT 4a08d04aef0595322e1b5ac7c52f28a931da85a5 +# Before running the integration tests conftest.py is +# loaded which results in loads auth.py that +# imports the docker-pycreds module. +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install wheel \ + && pip install docker-pycreds==0.2.1 \ + && pip install -r test-requirements.txt + +# Install yamllint for validating swagger.yaml +RUN pip install yamllint==1.5.0 + +# Install go-swagger for validating swagger.yaml +ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb +RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \ + && (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \ + && go install -v github.com/go-swagger/go-swagger/cmd/swagger + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + aarch64/buildpack-deps:jessie@sha256:107f4a96837ed89c493fc205cd28508ed0b6b680b4bf3e514e9f0fa0f6667b77 \ + aarch64/busybox:latest@sha256:5a06b8b2fdf22dd1f4085c6c3efd23ee99af01b2d668d286bc4be6d8baa10efb \ + aarch64/debian:jessie@sha256:e6f90b568631705bd5cb27490977378ba762792b38d47c91c4da7a539f63079a \ + aarch64/hello-world:latest@sha256:bd1722550b97668b23ede297abf824d4855f4d9f600dab7b4db1a963dae7ec9e +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/components/engine/Dockerfile.armhf b/components/engine/Dockerfile.armhf new file mode 100644 index 0000000000..6a566ba638 --- /dev/null +++ b/components/engine/Dockerfile.armhf @@ -0,0 +1,182 @@ +# This file describes the standard way to build Docker on ARMv7, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.armhf . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + createrepo \ + curl \ + cmake \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends \ + && pip install awscli==1.10.15 + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +ENV GO_VERSION 1.8.1 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# We're building for armhf, which is ARMv7, so let's be explicit about that +ENV GOARCH arm +ENV GOARM 7 + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + armhf/buildpack-deps:jessie@sha256:eb2dad77ef53e88d94c3c83862d315c806ea1ca49b6e74f4db362381365ce489 \ + armhf/busybox:latest@sha256:016a1e149d2acc2a3789a160dfa60ce870794eea27ad5e96f7a101970e5e1689 \ + armhf/debian:jessie@sha256:ac59fa18b28d0ef751eabb5ba4c4b5a9063f99398bae2f70495aa8ed6139b577 \ + armhf/hello-world:latest@sha256:9701edc932223a66e49dd6c894a11db8c2cf4eccd1414f1ec105a623bf16b426 +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/components/engine/Dockerfile.ppc64le b/components/engine/Dockerfile.ppc64le new file mode 100644 index 0000000000..234e90a534 --- /dev/null +++ b/components/engine/Dockerfile.ppc64le @@ -0,0 +1,189 @@ +# This file describes the standard way to build Docker on ppc64le, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.ppc64le . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ppc64le/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + apt-utils \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support ppc64le properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + + +# Install Go +# NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4 +ENV GO_VERSION 1.8.1 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + ppc64le/buildpack-deps:jessie@sha256:1a2f2d2cc8738f14b336aeffc3503b5c9dedf9e1f26c7313cb4999534ad4716f \ + ppc64le/busybox:latest@sha256:54f34c83adfab20cf0e630d879e210f07b0062cd6caaf16346a61396d50e7584 \ + ppc64le/debian:jessie@sha256:ea8c5b105e3790f075145b40e4be1e4488c9f33f55e6cc45182047b80a68f892 \ + ppc64le/hello-world:latest@sha256:7d57adf137665f748956c86089320710b66d08584db3500ed98f4bb3da637c2d +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/components/engine/Dockerfile.s390x b/components/engine/Dockerfile.s390x new file mode 100644 index 0000000000..f191481c5f --- /dev/null +++ b/components/engine/Dockerfile.s390x @@ -0,0 +1,182 @@ +# This file describes the standard way to build Docker on s390x, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.s390x . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM s390x/debian:jessie + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + apt-utils \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support s390x properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +ENV GO_VERSION 1.8.1 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux seccomp + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + s390x/buildpack-deps:jessie@sha256:552dec28146e4d2591fc0309aebdbac9e4fb1f335d90c70a14bbf72fb8bb1be5 \ + s390x/busybox:latest@sha256:e32f40c39ca596a4317392bd32809bb188c4ae5864ea827c3219c75c50069964 \ + s390x/debian:jessie@sha256:6994e3ffa5a1dabea09d536f350b3ed2715292cb469417c42a82b70fcbff7d32 \ + s390x/hello-world:latest@sha256:602db500fee63934292260e65c0c528128ad1c1c7c6497f95bbbac7d4d5312f1 +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/components/engine/Dockerfile.simple b/components/engine/Dockerfile.simple new file mode 100644 index 0000000000..ef5557745c --- /dev/null +++ b/components/engine/Dockerfile.simple @@ -0,0 +1,72 @@ +# docker build -t docker:simple -f Dockerfile.simple . +# docker run --rm docker:simple hack/make.sh dynbinary +# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit +# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli + +# This represents the bare minimum required to build and test Docker. + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Compile and runtime deps +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + btrfs-tools \ + build-essential \ + curl \ + cmake \ + gcc \ + git \ + libapparmor-dev \ + libdevmapper-dev \ + ca-certificates \ + e2fsprogs \ + iptables \ + procps \ + xfsprogs \ + xz-utils \ + \ + aufs-tools \ + vim-common \ + && rm -rf /var/lib/apt/lists/* + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +ENV GO_VERSION 1.8.1 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go +ENV CGO_LDFLAGS -L/lib + +# Install runc, containerd, tini and docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +ENV AUTO_GOPATH 1 +WORKDIR /usr/src/docker +COPY . /usr/src/docker diff --git a/components/engine/Dockerfile.solaris b/components/engine/Dockerfile.solaris new file mode 100644 index 0000000000..bb342e5e6a --- /dev/null +++ b/components/engine/Dockerfile.solaris @@ -0,0 +1,20 @@ +# Defines an image that hosts a native Docker build environment for Solaris +# TODO: Improve stub + +FROM solaris:latest + +# compile and runtime deps +RUN pkg install --accept \ + git \ + gnu-coreutils \ + gnu-make \ + gnu-tar \ + diagnostic/top \ + golang \ + library/golang/* \ + developer/gcc-* + +ENV GOPATH /go/:/usr/lib/gocode/1.5/ +ENV DOCKER_CROSSPLATFORMS solaris/amd64 +WORKDIR /go/src/github.com/docker/docker +COPY . /go/src/github.com/docker/docker diff --git a/components/engine/Dockerfile.windows b/components/engine/Dockerfile.windows new file mode 100644 index 0000000000..0a03830dc9 --- /dev/null +++ b/components/engine/Dockerfile.windows @@ -0,0 +1,256 @@ +# escape=` + +# ----------------------------------------------------------------------------------------- +# This file describes the standard way to build Docker in a container on Windows +# Server 2016 or Windows 10. +# +# Maintainer: @jhowardmsft +# ----------------------------------------------------------------------------------------- + + +# Prerequisites: +# -------------- +# +# 1. Windows Server 2016 or Windows 10 with all Windows updates applied. The major +# build number must be at least 14393. This can be confirmed, for example, by +# running the following from an elevated PowerShell prompt - this sample output +# is from a fully up to date machine as at mid-November 2016: +# +# >> PS C:\> $(gin).WindowsBuildLabEx +# >> 14393.447.amd64fre.rs1_release_inmarket.161102-0100 +# +# 2. Git for Windows (or another git client) must be installed. https://git-scm.com/download/win. +# +# 3. The machine must be configured to run containers. For example, by following +# the quick start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or +# https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md +# +# 4. If building in a Hyper-V VM: For Windows Server 2016 using Windows Server +# containers as the default option, it is recommended you have at least 1GB +# of memory assigned; For Windows 10 where Hyper-V Containers are employed, you +# should have at least 4GB of memory assigned. Note also, to run Hyper-V +# containers in a VM, it is necessary to configure the VM for nested virtualization. + +# ----------------------------------------------------------------------------------------- + + +# Usage: +# ----- +# +# The following steps should be run from an (elevated*) Windows PowerShell prompt. +# +# (*In a default installation of containers on Windows following the quick-start guidance at +# https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start, +# the docker.exe client must run elevated to be able to connect to the daemon). +# +# 1. Clone the sources from github.com: +# +# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker +# >> Cloning into 'C:\go\src\github.com\docker\docker'... +# >> remote: Counting objects: 186216, done. +# >> remote: Compressing objects: 100% (21/21), done. +# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195 +# >> Receiving objects: 100% (186216/186216), 104.32 MiB | 8.18 MiB/s, done. +# >> Resolving deltas: 100% (123139/123139), done. +# >> Checking connectivity... done. +# >> Checking out files: 100% (3912/3912), done. +# >> PS C:\> +# +# +# 2. Change directory to the cloned docker sources: +# +# >> cd C:\go\src\github.com\docker\docker +# +# +# 3. Build a docker image with the components required to build the docker binaries from source +# by running one of the following: +# +# >> docker build -t nativebuildimage -f Dockerfile.windows . +# >> docker build -t nativebuildimage -f Dockerfile.windows -m 2GB . (if using Hyper-V containers) +# +# +# 4. Build the docker executable binaries by running one of the following: +# +# >> $DOCKER_GITCOMMIT=(git rev-parse --short HEAD) +# >> docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT nativebuildimage hack\make.ps1 -Binary +# >> docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT -m 2GB nativebuildimage hack\make.ps1 -Binary (if using Hyper-V containers) +# +# +# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination +# folder on the host system where you want the binaries to be located. +# +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe +# +# +# 6. (Optional) Remove the interim container holding the built executable binaries: +# +# >> docker rm binaries +# +# +# 7. (Optional) Remove the image used for the container in which the executable +# binaries are build. Tip - it may be useful to keep this image around if you need to +# build multiple times. Then you can take advantage of the builder cache to have an +# image which has all the components required to build the binaries already installed. +# +# >> docker rmi nativebuildimage +# + +# ----------------------------------------------------------------------------------------- + + +# The validation tests can only run directly on the host. This is because they calculate +# information from the git repo, but the .git directory is not passed into the image as +# it is excluded via .dockerignore. Run the following from a Windows PowerShell prompt +# (elevation is not required): (Note Go must be installed to run these tests) +# +# >> hack\make.ps1 -DCO -PkgImports -GoFormat + + +# ----------------------------------------------------------------------------------------- + + +# To run unit tests, ensure you have created the nativebuildimage above. Then run one of +# the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run --rm nativebuildimage hack\make.ps1 -TestUnit +# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit (if using Hyper-V containers) + + +# ----------------------------------------------------------------------------------------- + + +# To run unit tests and binary build, ensure you have created the nativebuildimage above. Then +# run one of the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run nativebuildimage hack\make.ps1 -All +# >> docker run -m 2GB nativebuildimage hack\make.ps1 -All (if using Hyper-V containers) + +# ----------------------------------------------------------------------------------------- + + +# Important notes: +# --------------- +# +# Don't attempt to use a bind-mount to pass a local directory as the bundles target +# directory. It does not work (golang attempts for follow a mapped folder incorrectly). +# Instead, use docker cp as per the example. +# +# go.zip is not removed from the image as it is used by the Windows CI servers +# to ensure the host and image are running consistent versions of go. +# +# Nanoserver support is a work in progress. Although the image will build if the +# FROM statement is updated, it will not work when running autogen through hack\make.ps1. +# It is suspected that the required GCC utilities (eg gcc, windres, windmc) silently +# quit due to the use of console hooks which are not available. +# +# The docker integration tests do not currently run in a container on Windows, predominantly +# due to Windows not supporting privileged mode, so anything using a volume would fail. +# They (along with the rest of the docker CI suite) can be run using +# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1. +# +# ----------------------------------------------------------------------------------------- + + +# The number of build steps below are explicitly minimised to improve performance. +FROM microsoft/windowsservercore + +# Use PowerShell as the default shell +SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"] + +# Environment variable notes: +# - GO_VERSION must be consistent with 'Dockerfile' used by Linux. +# - FROM_DOCKERFILE is used for detection of building within a container. +ENV GO_VERSION=1.8.1 ` + GIT_VERSION=2.11.1 ` + GOPATH=C:\go ` + FROM_DOCKERFILE=1 + +RUN ` + Function Test-Nano() { ` + $EditionId = (Get-ItemProperty -Path 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name 'EditionID').EditionId; ` + return (($EditionId -eq 'ServerStandardNano') -or ($EditionId -eq 'ServerDataCenterNano') -or ($EditionId -eq 'NanoServer')); ` + }` + ` + Function Download-File([string] $source, [string] $target) { ` + if (Test-Nano) { ` + $handler = New-Object System.Net.Http.HttpClientHandler; ` + $client = New-Object System.Net.Http.HttpClient($handler); ` + $client.Timeout = New-Object System.TimeSpan(0, 30, 0); ` + $cancelTokenSource = [System.Threading.CancellationTokenSource]::new(); ` + $responseMsg = $client.GetAsync([System.Uri]::new($source), $cancelTokenSource.Token); ` + $responseMsg.Wait(); ` + if (!$responseMsg.IsCanceled) { ` + $response = $responseMsg.Result; ` + if ($response.IsSuccessStatusCode) { ` + $downloadedFileStream = [System.IO.FileStream]::new($target, [System.IO.FileMode]::Create, [System.IO.FileAccess]::Write); ` + $copyStreamOp = $response.Content.CopyToAsync($downloadedFileStream); ` + $copyStreamOp.Wait(); ` + $downloadedFileStream.Close(); ` + if ($copyStreamOp.Exception -ne $null) { throw $copyStreamOp.Exception } ` + } ` + } else { ` + Throw ("Failed to download " + $source) ` + }` + } else { ` + $webClient = New-Object System.Net.WebClient; ` + $webClient.DownloadFile($source, $target); ` + } ` + } ` + ` + setx /M PATH $('C:\git\cmd;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin'); ` + ` + Write-Host INFO: Downloading git...; ` + $location='https://www.nuget.org/api/v2/package/GitForWindows/'+$Env:GIT_VERSION; ` + Download-File $location C:\gitsetup.zip; ` + ` + Write-Host INFO: Downloading go...; ` + Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; ` + ` + Write-Host INFO: Downloading compiler 1 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; ` + ` + Write-Host INFO: Downloading compiler 2 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; ` + ` + Write-Host INFO: Downloading compiler 3 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; ` + ` + Write-Host INFO: Extracting git...; ` + Expand-Archive C:\gitsetup.zip C:\git-tmp; ` + New-Item -Type Directory C:\git | Out-Null; ` + Move-Item C:\git-tmp\tools\* C:\git\.; ` + Remove-Item -Recurse -Force C:\git-tmp; ` + ` + Write-Host INFO: Expanding go...; ` + Expand-Archive C:\go.zip -DestinationPath C:\; ` + ` + Write-Host INFO: Expanding compiler 1 of 3...; ` + Expand-Archive C:\gcc.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 2 of 3...; ` + Expand-Archive C:\runtime.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 3 of 3...; ` + Expand-Archive C:\binutils.zip -DestinationPath C:\gcc -Force; ` + ` + Write-Host INFO: Removing downloaded files...; ` + Remove-Item C:\gcc.zip; ` + Remove-Item C:\runtime.zip; ` + Remove-Item C:\binutils.zip; ` + Remove-Item C:\gitsetup.zip; ` + ` + Write-Host INFO: Creating source directory...; ` + New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; ` + ` + Write-Host INFO: Configuring git core.autocrlf...; ` + C:\git\cmd\git config --global core.autocrlf true; ` + ` + Write-Host INFO: Completed + +# Make PowerShell the default entrypoint +ENTRYPOINT ["powershell.exe"] + +# Set the working directory to the location of the sources +WORKDIR C:\go\src\github.com\docker\docker + +# Copy the sources into the container +COPY . . diff --git a/components/engine/LICENSE b/components/engine/LICENSE new file mode 100644 index 0000000000..9c8e20ab85 --- /dev/null +++ b/components/engine/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/components/engine/MAINTAINERS b/components/engine/MAINTAINERS new file mode 100644 index 0000000000..dc4485da10 --- /dev/null +++ b/components/engine/MAINTAINERS @@ -0,0 +1,462 @@ +# Docker maintainers file +# +# This file describes who runs the docker/docker project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant +# parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + + [Org."Core maintainers"] + + # The Core maintainers are the ghostbusters of the project: when there's a problem others + # can't solve, they show up and fix it with bizarre devices and weaponry. + # They have final say on technical implementation and coding style. + # They are ultimately responsible for quality in all its forms: usability polish, + # bugfixes, performance, stability, etc. When ownership can cleanly be passed to + # a subsystem, they are responsible for doing so and holding the + # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. + + # For each release (including minor releases), a "release captain" is assigned from the + # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure + # the release process is clear and up-to-date. + + people = [ + "aaronlehmann", + "akihirosuda", + "albers", + "aluzzardi", + "anusha", + "coolljt0725", + "cpuguy83", + "crosbymichael", + "dnephin", + "duglin", + "ehazlett", + "estesp", + "icecrime", + "jhowardmsft", + "johnstep", + "justincormack", + "lk4d4", + "mavenugo", + "mhbauer", + "mlaventure", + "runcom", + "stevvooe", + "tianon", + "tibor", + "tonistiigi", + "unclejack", + "vdemeester", + "vieux", + "yongtang" + ] + + [Org."Docs maintainers"] + + # TODO Describe the docs maintainers role. + + people = [ + "misty", + "thajeztah" + ] + + [Org.Curators] + + # The curators help ensure that incoming issues and pull requests are properly triaged and + # that our various contribution and reviewing processes are respected. With their knowledge of + # the repository activity, they can also guide contributors to relevant material or + # discussions. + # + # They are neither code nor docs reviewers, so they are never expected to merge. They can + # however: + # - close an issue or pull request when it's an exact duplicate + # - close an issue or pull request when it's inappropriate or off-topic + + people = [ + "aboch", + "alexellis", + "andrewhsu", + "anonymuse", + "chanwit", + "ehazlett", + "fntlnz", + "gianarb", + "mgoelzer", + "programmerq", + "rheinwein", + "thajeztah" + ] + + [Org.Alumni] + + # This list contains maintainers that are no longer active on the project. + # It is thanks to these people that the project has become what it is today. + # Thank you! + + people = [ + # David Calavera contributed many features to Docker, such as an improved + # event system, dynamic configuration reloading, volume plugins, fancy + # new templating options, and an external client credential store. As a + # maintainer, David was release captain for Docker 1.8, and competing + # with Jess Frazelle to be "top dream killer". + # David is now doing amazing stuff as CTO for https://www.netlify.com, + # and tweets as @calavera. + "calavera", + + # As a maintainer, Erik was responsible for the "builder", and + # started the first designs for the new networking model in + # Docker. Erik is now working on all kinds of plugins for Docker + # (https://github.com/contiv) and various open source projects + # in his own repository https://github.com/erikh. You may + # still stumble into him in our issue tracker, or on IRC. + "erikh", + + # After a false start with his first PR being rejected, James Turnbull became a frequent + # contributor to the documentation, and became a docs maintainer on December 5, 2013. As + # a maintainer, James lifted the docs to a higher standard, and introduced the community + # guidelines ("three strikes"). James is currently changing the world as CTO of https://www.empatico.org, + # meanwhile authoring various books that are worth checking out. You can find him on Twitter, + # rambling as @kartar, and although no longer active as a maintainer, he's always "game" to + # help out reviewing docs PRs, so you may still see him around in the repository. + "jamtur01", + + # Jessica Frazelle, also known as the "Keyser Söze of containers", + # runs *everything* in containers. She started contributing to + # Docker with a (fun fun) change involving both iptables and regular + # expressions (coz, YOLO!) on July 10, 2014 + # https://github.com/docker/docker/pull/6950/commits/f3a68ffa390fb851115c77783fa4031f1d3b2995. + # Jess was Release Captain for Docker 1.4, 1.6 and 1.7, and contributed + # many features and improvement, among which "seccomp profiles" (making + # containers a lot more secure). Besides being a maintainer, she + # set up the CI infrastructure for the project, giving everyone + # something to shout at if a PR failed ("noooo Janky!"). + # Jess is currently working on the DCOS security team at Mesosphere, + # and contributing to various open source projects. + # Be sure you don't miss her talks at a conference near you (a must-see), + # read her blog at https://blog.jessfraz.com (a must-read), and + # check out her open source projects on GitHub https://github.com/jessfraz (a must-try). + "jessfraz", + + # As a docs maintainer, Mary Anthony contributed greatly to the Docker + # docs. She wrote the Docker Contributor Guide and Getting Started + # Guides. She helped create a doc build system independent of + # docker/docker project, and implemented a new docs.docker.com theme and + # nav for 2015 Dockercon. Fun fact: the most inherited layer in DockerHub + # public repositories was originally referenced in + # maryatdocker/docker-whale back in May 2015. + "moxiegirl", + + # Jana Radhakrishnan was part of the SocketPlane team that joined Docker. + # As a maintainer, he was the lead architect for the Container Network + # Model (CNM) implemented through libnetwork, and the "routing mesh" powering + # Swarm mode networking. + # + # Jana started new adventures in networking, but you can find him tweeting as @mrjana, + # coding on GitHub https://github.com/mrjana, and he may be hiding on the Docker Community + # slack channel :-) + "mrjana", + + # Sven Dowideit became a well known person in the Docker ecosphere, building + # boot2docker, and became a regular contributor to the project, starting as + # early as October 2013 (https://github.com/docker/docker/pull/2119), to become + # a maintainer less than two months later (https://github.com/docker/docker/pull/3061). + # + # As a maintainer, Sven took on the task to convert the documentation from + # ReStructuredText to Markdown, migrate to Hugo for generating the docs, and + # writing tooling for building, testing, and publishing them. + # + # If you're not in the occasion to visit "the Australian office", you + # can keep up with Sven on Twitter (@SvenDowideit), his blog http://fosiki.com, + # and of course on GitHub. + "sven", + + # Vincent "vbatts!" Batts made his first contribution to the project + # in November 2013, to become a maintainer a few months later, on + # May 10, 2014 (https://github.com/docker/docker/commit/d6e666a87a01a5634c250358a94c814bf26cb778). + # As a maintainer, Vincent made important contributions to core elements + # of Docker, such as "distribution" (tarsum) and graphdrivers (btrfs, devicemapper). + # He also contributed the "tar-split" library, an important element + # for the content-addressable store. + # Vincent is currently a member of the Open Containers Initiative + # Technical Oversight Board (TOB), besides his work at Red Hat and + # Project Atomic. You can still find him regularly hanging out in + # our repository and the #docker-dev and #docker-maintainers IRC channels + # for a chat, as he's always a lot of fun. + "vbatts", + + # Vishnu became a maintainer to help out on the daemon codebase and + # libcontainer integration. He's currently involved in the + # Open Containers Initiative, working on the specifications, + # besides his work on cAdvisor and Kubernetes for Google. + "vishh" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aboch] + Name = "Alessandro Boch" + Email = "aboch@docker.com" + GitHub = "aboch" + + [people.alexellis] + Name = "Alex Ellis" + Email = "alexellis2@gmail.com" + GitHub = "alexellis" + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "suda.akihiro@lab.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.albers] + Name = "Harald Albers" + Email = "github@albersweb.de" + GitHub = "albers" + + [people.andrewhsu] + Name = "Andrew Hsu" + Email = "andrewhsu@docker.com" + GitHub = "andrewhsu" + + [people.anonymuse] + Name = "Jesse White" + Email = "anonymuse@gmail.com" + GitHub = "anonymuse" + + [people.anusha] + Name = "Anusha Ragunathan" + Email = "anusha@docker.com" + GitHub = "anusha-ragunathan" + + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" + + [people.coolljt0725] + Name = "Lei Jitang" + Email = "leijitang@huawei.com" + GitHub = "coolljt0725" + + [people.cpuguy83] + Name = "Brian Goff" + Email = "cpuguy83@gmail.com" + Github = "cpuguy83" + + [people.chanwit] + Name = "Chanwit Kaewkasi" + Email = "chanwit@gmail.com" + GitHub = "chanwit" + + [people.crosbymichael] + Name = "Michael Crosby" + Email = "crosbymichael@gmail.com" + GitHub = "crosbymichael" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.duglin] + Name = "Doug Davis" + Email = "dug@us.ibm.com" + GitHub = "duglin" + + [people.ehazlett] + Name = "Evan Hazlett" + Email = "ejhazlett@gmail.com" + GitHub = "ehazlett" + + [people.erikh] + Name = "Erik Hollensbe" + Email = "erik@docker.com" + GitHub = "erikh" + + [people.estesp] + Name = "Phil Estes" + Email = "estesp@linux.vnet.ibm.com" + GitHub = "estesp" + + [people.fntlnz] + Name = "Lorenzo Fontana" + Email = "fontanalorenz@gmail.com" + GitHub = "fntlnz" + + [people.gianarb] + Name = "Gianluca Arbezzano" + Email = "ga@thumpflow.com" + GitHub = "gianarb" + + [people.icecrime] + Name = "Arnaud Porterie" + Email = "icecrime@gmail.com" + GitHub = "icecrime" + + [people.jamtur01] + Name = "James Turnbull" + Email = "james@lovedthanlost.net" + GitHub = "jamtur01" + + [people.jhowardmsft] + Name = "John Howard" + Email = "jhoward@microsoft.com" + GitHub = "jhowardmsft" + + [people.jessfraz] + Name = "Jessie Frazelle" + Email = "jess@linux.com" + GitHub = "jessfraz" + + [people.johnstep] + Name = "John Stephens" + Email = "johnstep@docker.com" + GitHub = "johnstep" + + [people.justincormack] + Name = "Justin Cormack" + Email = "justin.cormack@docker.com" + GitHub = "justincormack" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.mavenugo] + Name = "Madhu Venugopal" + Email = "madhu@docker.com" + GitHub = "mavenugo" + + [people.mgoelzer] + Name = "Mike Goelzer" + Email = "mike.goelzer@docker.com" + GitHub = "mgoelzer" + + [people.mhbauer] + Name = "Morgan Bauer" + Email = "mbauer@us.ibm.com" + GitHub = "mhbauer" + + [people.misty] + Name = "Misty Stanley-Jones" + Email = "misty@docker.com" + GitHub = "mstanleyjones" + + [people.mlaventure] + Name = "Kenfe-Mickaël Laventure" + Email = "mickael.laventure@docker.com" + GitHub = "mlaventure" + + [people.moxiegirl] + Name = "Mary Anthony" + Email = "mary.anthony@docker.com" + GitHub = "moxiegirl" + + [people.mrjana] + Name = "Jana Radhakrishnan" + Email = "mrjana@docker.com" + GitHub = "mrjana" + + [people.programmerq] + Name = "Jeff Anderson" + Email = "jeff@docker.com" + GitHub = "programmerq" + + [people.rheinwein] + Name = "Laura Frank" + Email = "laura@codeship.com" + GitHub = "rheinwein" + + [people.runcom] + Name = "Antonio Murdaca" + Email = "runcom@redhat.com" + GitHub = "runcom" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" + + [people.sven] + Name = "Sven Dowideit" + Email = "SvenDowideit@home.org.au" + GitHub = "SvenDowideit" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.tianon] + Name = "Tianon Gravi" + Email = "admwiggin@gmail.com" + GitHub = "tianon" + + [people.tibor] + Name = "Tibor Vass" + Email = "tibor@docker.com" + GitHub = "tiborvass" + + [people.tonistiigi] + Name = "Tõnis Tiigi" + Email = "tonis@docker.com" + GitHub = "tonistiigi" + + [people.unclejack] + Name = "Cristian Staretu" + Email = "cristian.staretu@gmail.com" + GitHub = "unclejack" + + [people.vbatts] + Name = "Vincent Batts" + Email = "vbatts@redhat.com" + GitHub = "vbatts" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + GitHub = "vieux" + + [people.vishh] + Name = "Vishnu Kannan" + Email = "vishnuk@google.com" + GitHub = "vishh" + + [people.yongtang] + Name = "Yong Tang" + Email = "yong.tang.github@outlook.com" + GitHub = "yongtang" + diff --git a/components/engine/Makefile b/components/engine/Makefile new file mode 100644 index 0000000000..14bf91308c --- /dev/null +++ b/components/engine/Makefile @@ -0,0 +1,206 @@ +.PHONY: all binary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration-cli test-unit tgz validate win + +# set the graph driver as the current graphdriver if not set +DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //')) +export DOCKER_GRAPHDRIVER +DOCKER_INCREMENTAL_BINARY := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_INCREMENTAL_BINARY),1) +export DOCKER_INCREMENTAL_BINARY + +# get OS/Arch of docker engine +DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}') +DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}') + +DOCKER_GITCOMMIT := $(shell git rev-parse --short HEAD || echo unsupported) +export DOCKER_GITCOMMIT + +# env vars passed through directly to Docker's build scripts +# to allow things like `make KEEPBUNDLE=1 binary` easily +# `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + $(if $(DOCKER_CROSSPLATFORMS), -e DOCKER_CROSSPLATFORMS) \ + -e BUILD_APT_MIRROR \ + -e BUILDFLAGS \ + -e KEEPBUNDLE \ + -e DOCKER_BUILD_ARGS \ + -e DOCKER_BUILD_GOGC \ + -e DOCKER_BUILD_PKGS \ + -e DOCKER_CLI_PATH \ + -e DOCKER_DEBUG \ + -e DOCKER_EXPERIMENTAL \ + -e DOCKER_GITCOMMIT \ + -e DOCKER_GRAPHDRIVER \ + -e DOCKER_INCREMENTAL_BINARY \ + -e DOCKER_PORT \ + -e DOCKER_REMAP_ROOT \ + -e DOCKER_STORAGE_OPTS \ + -e DOCKER_USERLANDPROXY \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT \ + -e HTTP_PROXY \ + -e HTTPS_PROXY \ + -e NO_PROXY \ + -e http_proxy \ + -e https_proxy \ + -e no_proxy +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + +# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` +# (default to no bind mount if DOCKER_HOST is set) +# note: BINDDIR is supported for backwards-compatibility here +BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) +DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") + +# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs. +# The volume will be cleaned up when the container is removed due to `--rm`. +# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set. +DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) -v $(CURDIR)/.git:/go/src/github.com/docker/docker/.git + +# This allows to set the docker-dev container name +DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),) + +# enable package cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set +PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64 goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo +PKGCACHE_VOLROOT := dockerdev-go-pkg-cache +PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-) +DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),) +DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,) +DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_MOUNT_CLI) + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") +DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) +DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",) + +DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) +BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR)) +export BUILD_APT_MIRROR + +SWAGGER_DOCS_PORT ?= 9000 + +INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master) +INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker) + +define \n + + +endef + +# if this session isn't interactive, then we don't want to allocate a +# TTY, which would fail, but if it is interactive, we do want to attach +# so that the user can send e.g. ^C through. +INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) +ifeq ($(INTERACTIVE), 1) + DOCKER_FLAGS += -t +endif + +DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" + +default: binary + +all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives + $(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh' + +binary: build ## build the linux binaries + $(DOCKER_RUN_DOCKER) hack/make.sh binary + +build: bundles init-go-pkg-cache + $(warning The docker client CLI has moved to github.com/docker/cli. By default, it is built from the git sha specified in hack/dockerfile/binaries-commits. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n}) + docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" . + +bundles: + mkdir bundles + +clean: clean-pkg-cache-vol ## clean up cached resources + +clean-pkg-cache-vol: + @- $(foreach mapping,$(PKGCACHE_MAP), \ + $(shell docker volume rm $(PKGCACHE_VOLROOT)-$(shell echo $(mapping) | awk -F':/' '{ print $$1 }') > /dev/null 2>&1) \ + ) + +cross: build ## cross build the binaries for darwin, freebsd and\nwindows + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross + +deb: build ## build the deb packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb + + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +init-go-pkg-cache: + $(if $(PKGCACHE_DIR), mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g')) + +install: ## install the linux binaries + KEEPBUNDLE=1 hack/make.sh install-binary + +manpages: ## Generate man pages from go source and markdown + docker build ${DOCKER_BUILD_ARGS} -t docker-manpage-dev -f "man/$(DOCKERFILE)" ./man + docker run --rm \ + -v $(PWD):/go/src/github.com/docker/docker/ \ + docker-manpage-dev + +rpm: build ## build the rpm packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-rpm + +run: build ## run the docker daemon in a container + $(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run" + +shell: build ## start a shell inside the build env + $(DOCKER_RUN_DOCKER) bash + +yaml-docs-gen: build ## generate documentation YAML files consumed by docs repo + $(DOCKER_RUN_DOCKER) sh -c 'hack/make.sh yaml-docs-generator && ( root=$$(pwd); cd bundles/latest/yaml-docs-generator; mkdir docs; ./yaml-docs-generator --root $${root} --target $$(pwd)/docs )' + +test: build ## run the unit, integration and docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-unit test-integration-cli test-docker-py + +test-docker-py: build ## run the docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py + +test-integration-cli: build ## run the integration tests + $(DOCKER_RUN_DOCKER) hack/make.sh build-integration-test-binary dynbinary test-integration-cli + +test-unit: build ## run the unit tests + $(DOCKER_RUN_DOCKER) hack/make.sh test-unit + +tgz: build ## build the archives (.zip on windows and .tgz\notherwise) containing the binaries + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross tgz + +validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor + $(DOCKER_RUN_DOCKER) hack/validate/all + +win: build ## cross build the binary for windows + $(DOCKER_RUN_DOCKER) hack/make.sh win + +.PHONY: swagger-gen +swagger-gen: + docker run --rm -v $(PWD):/go/src/github.com/docker/docker \ + -w /go/src/github.com/docker/docker \ + --entrypoint hack/generate-swagger-api.sh \ + -e GOPATH=/go \ + quay.io/goswagger/swagger:0.7.4 + +.PHONY: swagger-docs +swagger-docs: ## preview the API documentation + @echo "API docs preview will be running at http://localhost:$(SWAGGER_DOCS_PORT)" + @docker run --rm -v $(PWD)/api/swagger.yaml:/usr/share/nginx/html/swagger.yaml \ + -e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \ + -p $(SWAGGER_DOCS_PORT):80 \ + bfirsh/redoc:1.6.2 + +build-integration-cli-on-swarm: build ## build images and binary for running integration-cli on Swarm in parallel + @echo "Building hack/integration-cli-on-swarm (if build fails, please refer to hack/integration-cli-on-swarm/README.md)" + go build -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host + @echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)" + docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent +# For worker, we don't use `docker build` so as to enable DOCKER_INCREMENTAL_BINARY and so on + @echo "Building $(INTEGRATION_CLI_WORKER_IMAGE) from $(DOCKER_IMAGE)" + $(eval tmp := integration-cli-worker-tmp) +# We mount pkgcache, but not bundle (bundle needs to be baked into the image) +# For avoiding bakings DOCKER_GRAPHDRIVER and so on to image, we cannot use $(DOCKER_ENVS) here + docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS -e DOCKER_INCREMENTAL_BINARY --privileged $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_IMAGE) top + docker exec $(tmp) hack/make.sh build-integration-test-binary dynbinary + docker exec $(tmp) go build -o /worker github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker + docker commit -c 'ENTRYPOINT ["/worker"]' $(tmp) $(INTEGRATION_CLI_WORKER_IMAGE) + docker rm -f $(tmp) diff --git a/components/engine/NOTICE b/components/engine/NOTICE new file mode 100644 index 0000000000..0c74e15b05 --- /dev/null +++ b/components/engine/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/components/engine/README.md b/components/engine/README.md new file mode 100644 index 0000000000..f768ff8449 --- /dev/null +++ b/components/engine/README.md @@ -0,0 +1,90 @@ +### Docker users, see [Moby and Docker](https://mobyproject.org/#moby-and-docker) to clarify the relationship between the projects + +### Docker maintainers and contributors, see [Transitioning to Moby](#transitioning-to-moby) for more details + +The Moby Project +================ + +![Moby Project logo](docs/static_files/moby-project-logo.png "The Moby Project") + +Moby is an open-source project created by Docker to advance the software containerization movement. +It provides a “Lego set” of dozens of components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts to experiment and exchange ideas. + +# Moby + +## Overview + +At the core of Moby is a framework to assemble specialized container systems. +It provides: + +- A library of containerized components for all vital aspects of a container system: OS, container runtime, orchestration, infrastructure management, networking, storage, security, build, image distribution, etc. +- Tools to assemble the components into runnable artifacts for a variety of platforms and architectures: bare metal (both x86 and Arm); executables for Linux, Mac and Windows; VM images for popular cloud and virtualization providers. +- A set of reference assemblies which can be used as-is, modified, or used as inspiration to create your own. + +All Moby components are containers, so creating new components is as easy as building a new OCI-compatible container. + +## Principles + +Moby is an open project guided by strong principles, but modular, flexible and without too strong an opinion on user experience, so it is open to the community to help set its direction. +The guiding principles are: + +- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations. +- Usable security: Moby will provide secure defaults without compromising usability. +- Container centric: Moby is built with containers, for running containers. + +With Moby, you should be able to describe all the components of your distributed application, from the high-level configuration files down to the kernel you would like to use and build and deploy it easily. + +Moby uses [containerd](https://github.com/containerd/containerd) as the default container runtime. + +## Audience + +Moby is recommended for anyone who wants to assemble a container-based system. This includes: + +- Hackers who want to customize or patch their Docker build +- System engineers or integrators building a container system +- Infrastructure providers looking to adapt existing container systems to their environment +- Container enthusiasts who want to experiment with the latest container tech +- Open-source developers looking to test their project in a variety of different systems +- Anyone curious about Docker internals and how it’s built + +Moby is NOT recommended for: + +- Application developers looking for an easy way to run their applications in containers. We recommend Docker CE instead. +- Enterprise IT and development teams looking for a ready-to-use, commercially supported container platform. We recommend Docker EE instead. +- Anyone curious about containers and looking for an easy way to learn. We recommend the docker.com website instead. + +# Transitioning to Moby + +Docker is transitioning all of its open source collaborations to the Moby project going forward. +During the transition, all open source activity should continue as usual. + +We are proposing the following list of changes: + +- splitting up the engine into more open components +- removing the docker UI, SDK etc to keep them in the Docker org +- clarifying that the project is not limited to the engine, but to the assembly of all the individual components of the Docker platform +- open-source new tools & components which we currently use to assemble the Docker product, but could benefit the community +- defining an open, community-centric governance inspired by the Fedora project (a very successful example of balancing the needs of the community with the constraints of the primary corporate sponsor) + +----- + +Legal +===== + +*Brought to you courtesy of our legal counsel. For more context, +please see the [NOTICE](https://github.com/moby/moby/blob/master/NOTICE) document in this repo.* + +Use and transfer of Moby may be subject to certain restrictions by the +United States and other governments. + +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + + +Licensing +========= +Moby is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full +license text. diff --git a/components/engine/ROADMAP.md b/components/engine/ROADMAP.md new file mode 100644 index 0000000000..05a8695ade --- /dev/null +++ b/components/engine/ROADMAP.md @@ -0,0 +1,118 @@ +Docker Engine Roadmap +===================== + +### How should I use this document? + +This document provides description of items that the project decided to prioritize. This should +serve as a reference point for Docker contributors to understand where the project is going, and +help determine if a contribution could be conflicting with some longer terms plans. + +The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be +refused (except for those mentioned as "frozen features" below)! We are always happy to receive +patches for new cool features we haven't thought about, or didn't judge priority. Please however +understand that such patches might take longer for us to review. + +### How can I help? + +Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described +in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our +goal is to split down the workload in such way that anybody can jump in and help. Please comment on +issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already +assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is +the best way to go. + +### How can I add something to the roadmap? + +The roadmap process is new to the Docker Engine: we are only beginning to structure and document the +project objectives. Our immediate goal is to be more transparent, and work with our community to +focus our efforts on fewer prioritized topics. + +We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but +we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we +won't be accepting pull requests adding or removing items from this file. + +# 1. Features and refactoring + +## 1.1 Runtime improvements + +We recently introduced [`runC`](https://runc.io) as a standalone low-level tool for container +execution. The initial goal was to integrate runC as a replacement in the Engine for the traditional +default libcontainer `execdriver`, but the Engine internals were not ready for this. + +As runC continued evolving, and the OCI specification along with it, we created +[`containerd`](https://containerd.tools/), a daemon to control and monitor multiple `runC`. This is +the new target for Engine integration, as it can entirely replace the whole `execdriver` +architecture, and container monitoring along with it. + +Docker Engine will rely on a long-running `containerd` companion daemon for all container execution +related operations. This could open the door in the future for Engine restarts without interrupting +running containers. + +## 1.2 Plugins improvements + +Docker Engine 1.7.0 introduced plugin support, initially for the use cases of volumes and networks +extensions. The plugin infrastructure was kept minimal as we were collecting use cases and real +world feedback before optimizing for any particular workflow. + +In the future, we'd like plugins to become first class citizens, and encourage an ecosystem of +plugins. This implies in particular making it trivially easy to distribute plugins as containers +through any Registry instance, as well as solving the commonly heard pain points of plugins needing +to be treated as somewhat special (being active at all time, started before any other user +containers, and not as easily dismissed). + +## 1.3 Internal decoupling + +A lot of work has been done in trying to decouple the Docker Engine's internals. In particular, the +API implementation has been refactored, and the Builder side of the daemon is now +[fully independent](https://github.com/docker/docker/tree/master/builder) while still residing in +the same repository. + +We are exploring ways to go further with that decoupling, capitalizing on the work introduced by the +runtime renovation and plugins improvement efforts. Indeed, the combination of `containerd` support +with the concept of "special" containers opens the door for bootstrapping more Engine internals +using the same facilities. + +## 1.4 Cluster capable Engine + +The community has been pushing for a more cluster capable Docker Engine, and a huge effort was spent +adding features such as multihost networking, and node discovery down at the Engine level. Yet, the +Engine is currently incapable of taking scheduling decisions alone, and continues relying on Swarm +for that. + +We plan to complete this effort and make Engine fully cluster capable. Multiple instances of the +Docker Engine being already capable of discovering each other and establish overlay networking for +their container to communicate, the next step is for a given Engine to gain ability to dispatch work +to another node in the cluster. This will be introduced in a backward compatible way, such that a +`docker run` invocation on a particular node remains fully deterministic. + +# 2. Frozen features + +## 2.1 Docker exec + +We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a +*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort. + +## 2.2 Remote Registry Operations + +A large amount of work is ongoing in the area of image distribution and provenance. This includes +moving to the V2 Registry API and heavily refactoring the code that powers these features. The +desired result is more secure, reliable and easier to use image distribution. + +Part of the problem with this part of the code base is the lack of a stable and flexible interface. +If new features are added that access the registry without solidifying these interfaces, achieving +feature parity will continue to be elusive. While we get a handle on this situation, we are imposing +a moratorium on new code that accesses the Registry API in commands that don't already make remote +calls. + +Currently, only the following commands cause interaction with a remote registry: + + - push + - pull + - run + - build + - search + - login + +In the interest of stabilizing the registry access model during this ongoing work, we are not +accepting additions to other commands that will cause remote interaction with the Registry API. This +moratorium will lift when the goals of the distribution project have been met. diff --git a/components/engine/VENDORING.md b/components/engine/VENDORING.md new file mode 100644 index 0000000000..8884f885a7 --- /dev/null +++ b/components/engine/VENDORING.md @@ -0,0 +1,46 @@ +# Vendoring policies + +This document outlines recommended Vendoring policies for Docker repositories. +(Example, libnetwork is a Docker repo and logrus is not.) + +## Vendoring using tags + +Commit ID based vendoring provides little/no information about the updates +vendored. To fix this, vendors will now require that repositories use annotated +tags along with commit ids to snapshot commits. Annotated tags by themselves +are not sufficient, since the same tag can be force updated to reference +different commits. + +Each tag should: +- Follow Semantic Versioning rules (refer to section on "Semantic Versioning") +- Have a corresponding entry in the change tracking document. + +Each repo should: +- Have a change tracking document between tags/releases. Ex: CHANGELOG.md, +github releases file. + +The goal here is for consuming repos to be able to use the tag version and +changelog updates to determine whether the vendoring will cause any breaking or +backward incompatible changes. This also means that repos can specify having +dependency on a package of a specific version or greater up to the next major +release, without encountering breaking changes. + +## Semantic Versioning +Annotated version tags should follow [Semantic Versioning](http://semver.org) policies: + +"Given a version number MAJOR.MINOR.PATCH, increment the: + + 1. MAJOR version when you make incompatible API changes, + 2. MINOR version when you add functionality in a backwards-compatible manner, and + 3. PATCH version when you make backwards-compatible bug fixes. + +Additional labels for pre-release and build metadata are available as extensions +to the MAJOR.MINOR.PATCH format." + +## Vendoring cadence +In order to avoid huge vendoring changes, it is recommended to have a regular +cadence for vendoring updates. e.g. monthly. + +## Pre-merge vendoring tests +All related repos will be vendored into docker/docker. +CI on docker/docker should catch any breaking changes involving multiple repos. diff --git a/components/engine/VERSION b/components/engine/VERSION new file mode 100644 index 0000000000..2d736aaa18 --- /dev/null +++ b/components/engine/VERSION @@ -0,0 +1 @@ +17.06.0-dev diff --git a/components/engine/api/README.md b/components/engine/api/README.md new file mode 100644 index 0000000000..bb88132524 --- /dev/null +++ b/components/engine/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. Automatically generate documentation. +2. Automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919) + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. + +The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/components/engine/api/common.go b/components/engine/api/common.go new file mode 100644 index 0000000000..9df6255c08 --- /dev/null +++ b/components/engine/api/common.go @@ -0,0 +1,166 @@ +package api + +import ( + "encoding/json" + "encoding/pem" + "fmt" + "mime" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/docker/libtrust" +) + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion string = "1.30" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier string = "scratch" +) + +// byPortInfo is a temporary type used to sort types.Port by its fields +type byPortInfo []types.Port + +func (r byPortInfo) Len() int { return len(r) } +func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortInfo) Less(i, j int) bool { + if r[i].PrivatePort != r[j].PrivatePort { + return r[i].PrivatePort < r[j].PrivatePort + } + + if r[i].IP != r[j].IP { + return r[i].IP < r[j].IP + } + + if r[i].PublicPort != r[j].PublicPort { + return r[i].PublicPort < r[j].PublicPort + } + + return r[i].Type < r[j].Type +} + +// DisplayablePorts returns formatted string representing open ports of container +// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" +// it's used by command 'docker ps' +func DisplayablePorts(ports []types.Port) string { + type portGroup struct { + first uint16 + last uint16 + } + groupMap := make(map[string]*portGroup) + var result []string + var hostMappings []string + var groupMapKeys []string + sort.Sort(byPortInfo(ports)) + for _, port := range ports { + current := port.PrivatePort + portKey := port.Type + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + group := groupMap[portKey] + + if group == nil { + groupMap[portKey] = &portGroup{first: current, last: current} + // record order that groupMap keys are created + groupMapKeys = append(groupMapKeys, portKey) + continue + } + if current == (group.last + 1) { + group.last = current + continue + } + + result = append(result, formGroup(portKey, group.first, group.last)) + groupMap[portKey] = &portGroup{first: current, last: current} + } + for _, portKey := range groupMapKeys { + g := groupMap[portKey] + result = append(result, formGroup(portKey, g.first, g.last)) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func formGroup(key string, start, last uint16) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + group := strconv.Itoa(int(start)) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +// MatchesContentType validates the content type against the expected one +func MatchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) + if err != nil { + return nil, fmt.Errorf("Error serializing key: %s", err) + } + if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} + +func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { + if ext == ".json" || ext == ".jwk" { + encoded, err = json.Marshal(key) + if err != nil { + return nil, fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + pemBlock, err := key.PEMBlock() + if err != nil { + return nil, fmt.Errorf("unable to encode private key PEM: %s", err) + } + encoded = pem.EncodeToMemory(pemBlock) + } + return +} diff --git a/components/engine/api/common_test.go b/components/engine/api/common_test.go new file mode 100644 index 0000000000..4d67206ffa --- /dev/null +++ b/components/engine/api/common_test.go @@ -0,0 +1,341 @@ +package api + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "os" + + "github.com/docker/docker/api/types" +) + +type ports struct { + ports []types.Port + expected string +} + +// DisplayablePorts +func TestDisplayablePorts(t *testing.T) { + cases := []ports{ + { + []types.Port{ + { + PrivatePort: 9988, + Type: "tcp", + }, + }, + "9988/tcp"}, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp", + }, + { + []types.Port{ + { + IP: "0.0.0.0", + PrivatePort: 9988, + Type: "tcp", + }, + }, + "0.0.0.0:0->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "4.3.2.1:8899->9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 9988, + Type: "tcp", + }, + }, + "4.3.2.1:9988->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp, 9988/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 9998, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 9999, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:9998-9999->9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 8887, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 8888, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp", + }, + { + []types.Port{ + { + PrivatePort: 9998, + Type: "udp", + }, { + PrivatePort: 9999, + Type: "udp", + }, + }, + "9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, + }, + "9988/udp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + PrivatePort: 80, + Type: "tcp", + }, { + PrivatePort: 1024, + Type: "tcp", + }, { + PrivatePort: 80, + Type: "udp", + }, { + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, + }, + "80/tcp, 80/udp, 1024/tcp, 1024/udp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp", + }, + } + + for _, port := range cases { + actual := DisplayablePorts(port.ports) + if port.expected != actual { + t.Fatalf("Expected %s, got %s.", port.expected, actual) + } + } +} + +// MatchesContentType +func TestJsonContentType(t *testing.T) { + if !MatchesContentType("application/json", "application/json") { + t.Fail() + } + + if !MatchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if MatchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} + +// LoadOrCreateTrustKey +func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") + if err != nil { + t.Fatal(err) + } + + if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil { + t.Fatal("expected an error, got nothing.") + } + +} + +func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + // Without the need to create the folder hierarchy + tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With the need to create the folder hierarchy as tmpKeyFie is in a path + // where some folders do not exist. + tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With no path at all + defer os.Remove("keyfile") + if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat("keyfile"); err != nil { + t.Fatalf("Expected to find a file keyfile, got %v", err) + } +} + +func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { + tmpKeyFile := filepath.Join("fixtures", "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a key file, got : %v and %v", err, key) + } +} diff --git a/components/engine/api/common_unix.go b/components/engine/api/common_unix.go new file mode 100644 index 0000000000..081e61c451 --- /dev/null +++ b/components/engine/api/common_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package api + +// MinVersion represents Minimum REST API version supported +const MinVersion string = "1.12" diff --git a/components/engine/api/common_windows.go b/components/engine/api/common_windows.go new file mode 100644 index 0000000000..a6268a4ff7 --- /dev/null +++ b/components/engine/api/common_windows.go @@ -0,0 +1,8 @@ +package api + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/components/engine/api/errors/errors.go b/components/engine/api/errors/errors.go new file mode 100644 index 0000000000..39d52e1a07 --- /dev/null +++ b/components/engine/api/errors/errors.go @@ -0,0 +1,47 @@ +package errors + +import "net/http" + +// apiError is an error wrapper that also +// holds information about response status codes. +type apiError struct { + error + statusCode int +} + +// HTTPErrorStatusCode returns a status code. +func (e apiError) HTTPErrorStatusCode() int { + return e.statusCode +} + +// NewErrorWithStatusCode allows you to associate +// a specific HTTP Status Code to an error. +// The server will take that code and set +// it as the response status. +func NewErrorWithStatusCode(err error, code int) error { + return apiError{err, code} +} + +// NewBadRequestError creates a new API error +// that has the 400 HTTP status code associated to it. +func NewBadRequestError(err error) error { + return NewErrorWithStatusCode(err, http.StatusBadRequest) +} + +// NewRequestForbiddenError creates a new API error +// that has the 403 HTTP status code associated to it. +func NewRequestForbiddenError(err error) error { + return NewErrorWithStatusCode(err, http.StatusForbidden) +} + +// NewRequestNotFoundError creates a new API error +// that has the 404 HTTP status code associated to it. +func NewRequestNotFoundError(err error) error { + return NewErrorWithStatusCode(err, http.StatusNotFound) +} + +// NewRequestConflictError creates a new API error +// that has the 409 HTTP status code associated to it. +func NewRequestConflictError(err error) error { + return NewErrorWithStatusCode(err, http.StatusConflict) +} diff --git a/components/engine/api/errors/errors_test.go b/components/engine/api/errors/errors_test.go new file mode 100644 index 0000000000..1d6a596ac3 --- /dev/null +++ b/components/engine/api/errors/errors_test.go @@ -0,0 +1,64 @@ +package errors + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "net/http" + "testing" +) + +func newError(errorname string) error { + + return fmt.Errorf("test%v", errorname) +} + +func TestErrors(t *testing.T) { + errmsg := newError("apiError") + err := apiError{ + error: errmsg, + statusCode: 0, + } + assert.Equal(t, err.HTTPErrorStatusCode(), err.statusCode) + + errmsg = newError("ErrorWithStatusCode") + errcode := 1 + serr := NewErrorWithStatusCode(errmsg, errcode) + apierr, ok := serr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, errcode, apierr.statusCode) + + errmsg = newError("NewBadRequestError") + baderr := NewBadRequestError(errmsg) + apierr, ok = baderr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusBadRequest, apierr.statusCode) + + errmsg = newError("RequestForbiddenError") + ferr := NewRequestForbiddenError(errmsg) + apierr, ok = ferr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusForbidden, apierr.statusCode) + + errmsg = newError("RequestNotFoundError") + nerr := NewRequestNotFoundError(errmsg) + apierr, ok = nerr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusNotFound, apierr.statusCode) + + errmsg = newError("RequestConflictError") + cerr := NewRequestConflictError(errmsg) + apierr, ok = cerr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusConflict, apierr.statusCode) + +} diff --git a/components/engine/api/fixtures/keyfile b/components/engine/api/fixtures/keyfile new file mode 100644 index 0000000000..322f254404 --- /dev/null +++ b/components/engine/api/fixtures/keyfile @@ -0,0 +1,7 @@ +-----BEGIN EC PRIVATE KEY----- +keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY + +MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49 +AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky +NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA== +-----END EC PRIVATE KEY----- diff --git a/components/engine/api/names.go b/components/engine/api/names.go new file mode 100644 index 0000000000..f147d1f4ce --- /dev/null +++ b/components/engine/api/names.go @@ -0,0 +1,9 @@ +package api + +import "regexp" + +// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. +const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` + +// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. +var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) diff --git a/components/engine/api/server/backend/build/backend.go b/components/engine/api/server/backend/build/backend.go new file mode 100644 index 0000000000..bf171441ca --- /dev/null +++ b/components/engine/api/server/backend/build/backend.go @@ -0,0 +1,70 @@ +package build + +import ( + "fmt" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// ImageComponent provides an interface for working with images +type ImageComponent interface { + SquashImage(from string, to string) (string, error) + TagImageWithReference(image.ID, reference.Named) error +} + +// Backend provides build functionality to the API router +type Backend struct { + manager *dockerfile.BuildManager + imageComponent ImageComponent +} + +// NewBackend creates a new build backend from components +func NewBackend(components ImageComponent, builderBackend builder.Backend) *Backend { + manager := dockerfile.NewBuildManager(builderBackend) + return &Backend{imageComponent: components, manager: manager} +} + +// Build builds an image from a Source +func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string, error) { + options := config.Options + tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags) + if err != nil { + return "", err + } + + build, err := b.manager.Build(ctx, config) + if err != nil { + return "", err + } + + var imageID = build.ImageID + if options.Squash { + if imageID, err = squashBuild(build, b.imageComponent); err != nil { + return "", err + } + } + + stdout := config.ProgressWriter.StdoutFormatter + fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID)) + err = tagger.TagImages(image.ID(imageID)) + return imageID, err +} + +func squashBuild(build *builder.Result, imageComponent ImageComponent) (string, error) { + var fromID string + if build.FromImage != nil { + fromID = build.FromImage.ImageID() + } + imageID, err := imageComponent.SquashImage(build.ImageID, fromID) + if err != nil { + return "", errors.Wrap(err, "error squashing image") + } + return imageID, nil +} diff --git a/components/engine/api/server/backend/build/tag.go b/components/engine/api/server/backend/build/tag.go new file mode 100644 index 0000000000..5c3918a3e1 --- /dev/null +++ b/components/engine/api/server/backend/build/tag.go @@ -0,0 +1,77 @@ +package build + +import ( + "fmt" + "io" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/pkg/errors" +) + +// Tagger is responsible for tagging an image created by a builder +type Tagger struct { + imageComponent ImageComponent + stdout io.Writer + repoAndTags []reference.Named +} + +// NewTagger returns a new Tagger for tagging the images of a build. +// If any of the names are invalid tags an error is returned. +func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagger, error) { + reposAndTags, err := sanitizeRepoAndTags(names) + if err != nil { + return nil, err + } + return &Tagger{ + imageComponent: backend, + stdout: stdout, + repoAndTags: reposAndTags, + }, nil +} + +// TagImages creates image tags for the imageID +func (bt *Tagger) TagImages(imageID image.ID) error { + for _, rt := range bt.repoAndTags { + if err := bt.imageComponent.TagImageWithReference(imageID, rt); err != nil { + return err + } + fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt)) + } + return nil +} + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { + var ( + repoAndTags []reference.Named + // This map is used for deduplicating the "-t" parameter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + if repo == "" { + continue + } + + ref, err := reference.ParseNormalizedNamed(repo) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("build tag cannot contain a digest") + } + + ref = reference.TagNameOnly(ref) + + nameWithTag := ref.String() + + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, ref) + } + } + return repoAndTags, nil +} diff --git a/components/engine/api/server/httputils/decoder.go b/components/engine/api/server/httputils/decoder.go new file mode 100644 index 0000000000..458eac5600 --- /dev/null +++ b/components/engine/api/server/httputils/decoder.go @@ -0,0 +1,16 @@ +package httputils + +import ( + "io" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// ContainerDecoder specifies how +// to translate an io.Reader into +// container configuration. +type ContainerDecoder interface { + DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error) + DecodeHostConfig(src io.Reader) (*container.HostConfig, error) +} diff --git a/components/engine/api/server/httputils/errors.go b/components/engine/api/server/httputils/errors.go new file mode 100644 index 0000000000..40ff351495 --- /dev/null +++ b/components/engine/api/server/httputils/errors.go @@ -0,0 +1,104 @@ +package httputils + +import ( + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/gorilla/mux" + "google.golang.org/grpc" +) + +// httpStatusError is an interface +// that errors with custom status codes +// implement to tell the api layer +// which response status to set. +type httpStatusError interface { + HTTPErrorStatusCode() int +} + +// inputValidationError is an interface +// that errors generated by invalid +// inputs can implement to tell the +// api layer to set a 400 status code +// in the response. +type inputValidationError interface { + IsValidationError() bool +} + +// GetHTTPErrorStatusCode retrieves status code from error message. +func GetHTTPErrorStatusCode(err error) int { + if err == nil { + logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") + return http.StatusInternalServerError + } + + var statusCode int + errMsg := err.Error() + + switch e := err.(type) { + case httpStatusError: + statusCode = e.HTTPErrorStatusCode() + case inputValidationError: + statusCode = http.StatusBadRequest + default: + // FIXME: this is brittle and should not be necessary, but we still need to identify if + // there are errors falling back into this logic. + // If we need to differentiate between different possible error types, + // we should create appropriate error types that implement the httpStatusError interface. + errStr := strings.ToLower(errMsg) + for _, status := range []struct { + keyword string + code int + }{ + {"not found", http.StatusNotFound}, + {"cannot find", http.StatusNotFound}, + {"no such", http.StatusNotFound}, + {"bad parameter", http.StatusBadRequest}, + {"no command", http.StatusBadRequest}, + {"conflict", http.StatusConflict}, + {"impossible", http.StatusNotAcceptable}, + {"wrong login/password", http.StatusUnauthorized}, + {"unauthorized", http.StatusUnauthorized}, + {"hasn't been activated", http.StatusForbidden}, + {"this node", http.StatusServiceUnavailable}, + {"needs to be unlocked", http.StatusServiceUnavailable}, + {"certificates have expired", http.StatusServiceUnavailable}, + } { + if strings.Contains(errStr, status.keyword) { + statusCode = status.code + break + } + } + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + return statusCode +} + +func apiVersionSupportsJSONErrors(version string) bool { + const firstAPIVersionWithJSONErrors = "1.23" + return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors) +} + +// MakeErrorHandler makes an HTTP handler that decodes a Docker error and +// returns it in the response. +func MakeErrorHandler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + statusCode := GetHTTPErrorStatusCode(err) + vars := mux.Vars(r) + if apiVersionSupportsJSONErrors(vars["version"]) { + response := &types.ErrorResponse{ + Message: err.Error(), + } + WriteJSON(w, statusCode, response) + } else { + http.Error(w, grpc.ErrorDesc(err), statusCode) + } + } +} diff --git a/components/engine/api/server/httputils/form.go b/components/engine/api/server/httputils/form.go new file mode 100644 index 0000000000..78bd379c7a --- /dev/null +++ b/components/engine/api/server/httputils/form.go @@ -0,0 +1,70 @@ +package httputils + +import ( + "errors" + "net/http" + "path/filepath" + "strconv" + "strings" +) + +// BoolValue transforms a form value in different formats into a boolean type. +func BoolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +// BoolValueOrDefault returns the default bool passed if the query param is +// missing, otherwise it's just a proxy to boolValue above. +func BoolValueOrDefault(r *http.Request, k string, d bool) bool { + if _, ok := r.Form[k]; !ok { + return d + } + return BoolValue(r, k) +} + +// Int64ValueOrZero parses a form value into an int64 type. +// It returns 0 if the parsing fails. +func Int64ValueOrZero(r *http.Request, k string) int64 { + val, err := Int64ValueOrDefault(r, k, 0) + if err != nil { + return 0 + } + return val +} + +// Int64ValueOrDefault parses a form value into an int64 type. If there is an +// error, returns the error. If there is no value returns the default value. +func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { + if r.Form.Get(field) != "" { + value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) + return value, err + } + return def, nil +} + +// ArchiveOptions stores archive information for different operations. +type ArchiveOptions struct { + Name string + Path string +} + +// ArchiveFormValues parses form values and turns them into ArchiveOptions. +// It fails if the archive name and path are not in the request. +func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { + if err := ParseForm(r); err != nil { + return ArchiveOptions{}, err + } + + name := vars["name"] + path := filepath.FromSlash(r.Form.Get("path")) + + switch { + case name == "": + return ArchiveOptions{}, errors.New("bad parameter: 'name' cannot be empty") + case path == "": + return ArchiveOptions{}, errors.New("bad parameter: 'path' cannot be empty") + } + + return ArchiveOptions{name, path}, nil +} diff --git a/components/engine/api/server/httputils/form_test.go b/components/engine/api/server/httputils/form_test.go new file mode 100644 index 0000000000..bc790e99c3 --- /dev/null +++ b/components/engine/api/server/httputils/form_test.go @@ -0,0 +1,105 @@ +package httputils + +import ( + "net/http" + "net/url" + "testing" +) + +func TestBoolValue(t *testing.T) { + cases := map[string]bool{ + "": false, + "0": false, + "no": false, + "false": false, + "none": false, + "1": true, + "yes": true, + "true": true, + "one": true, + "100": true, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := BoolValue(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestBoolValueOrDefault(t *testing.T) { + r, _ := http.NewRequest("GET", "", nil) + if !BoolValueOrDefault(r, "queryparam", true) { + t.Fatal("Expected to get true default value, got false") + } + + v := url.Values{} + v.Set("param", "") + r, _ = http.NewRequest("GET", "", nil) + r.Form = v + if BoolValueOrDefault(r, "param", true) { + t.Fatal("Expected not to get true") + } +} + +func TestInt64ValueOrZero(t *testing.T) { + cases := map[string]int64{ + "": 0, + "asdf": 0, + "0": 0, + "1": 1, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := Int64ValueOrZero(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestInt64ValueOrDefault(t *testing.T) { + cases := map[string]int64{ + "": -1, + "-1": -1, + "42": 42, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a, err := Int64ValueOrDefault(r, "test", -1) + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + if err != nil { + t.Fatalf("Error should be nil, but received: %s", err) + } + } +} + +func TestInt64ValueOrDefaultWithError(t *testing.T) { + v := url.Values{} + v.Set("test", "invalid") + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + _, err := Int64ValueOrDefault(r, "test", -1) + if err == nil { + t.Fatal("Expected an error.") + } +} diff --git a/components/engine/api/server/httputils/httputils.go b/components/engine/api/server/httputils/httputils.go new file mode 100644 index 0000000000..2c2e8452f9 --- /dev/null +++ b/components/engine/api/server/httputils/httputils.go @@ -0,0 +1,88 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api" +) + +// APIVersionKey is the client's requested API version. +const APIVersionKey = "api-version" + +// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. +// Any function that has the appropriate signature can be registered as an API endpoint (e.g. getVersion). +type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +// HijackConnection interrupts the http response writer to get the +// underlying connection and operate with it. +func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// CloseStreams ensures that a list for http streams are properly closed. +func CloseStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + +// CheckForJSON makes sure that the request's Content-Type is application/json. +func CheckForJSON(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +// ParseForm ensures the request form is parsed even with invalid content types. +// If we don't do this, POST method without Content-type (even with empty body) will fail. +func ParseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// VersionFromContext returns an API version from the context using APIVersionKey. +// It panics if the context value does not have version.Version type. +func VersionFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + + if val := ctx.Value(APIVersionKey); val != nil { + return val.(string) + } + + return "" +} diff --git a/components/engine/api/server/httputils/httputils_write_json.go b/components/engine/api/server/httputils/httputils_write_json.go new file mode 100644 index 0000000000..4787cc3c33 --- /dev/null +++ b/components/engine/api/server/httputils/httputils_write_json.go @@ -0,0 +1,17 @@ +// +build go1.7 + +package httputils + +import ( + "encoding/json" + "net/http" +) + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + enc := json.NewEncoder(w) + enc.SetEscapeHTML(false) + return enc.Encode(v) +} diff --git a/components/engine/api/server/httputils/httputils_write_json_go16.go b/components/engine/api/server/httputils/httputils_write_json_go16.go new file mode 100644 index 0000000000..bdc6981738 --- /dev/null +++ b/components/engine/api/server/httputils/httputils_write_json_go16.go @@ -0,0 +1,16 @@ +// +build go1.6,!go1.7 + +package httputils + +import ( + "encoding/json" + "net/http" +) + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + enc := json.NewEncoder(w) + return enc.Encode(v) +} diff --git a/components/engine/api/server/httputils/write_log_stream.go b/components/engine/api/server/httputils/write_log_stream.go new file mode 100644 index 0000000000..891e5f02b3 --- /dev/null +++ b/components/engine/api/server/httputils/write_log_stream.go @@ -0,0 +1,94 @@ +package httputils + +import ( + "fmt" + "io" + "net/url" + "sort" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/stdcopy" +) + +// WriteLogStream writes an encoded byte stream of log messages from the +// messages channel, multiplexing them with a stdcopy.Writer if mux is true +func WriteLogStream(ctx context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) { + wf := ioutils.NewWriteFlusher(w) + defer wf.Close() + + wf.Flush() + + // this might seem like doing below is clear: + // var outStream io.Writer = wf + // however, this GREATLY DISPLEASES golint, and if you do that, it will + // fail CI. we need outstream to be type writer because if we mux streams, + // we will need to reassign all of the streams to be stdwriters, which only + // conforms to the io.Writer interface. + var outStream io.Writer + outStream = wf + errStream := outStream + sysErrStream := errStream + if mux { + sysErrStream = stdcopy.NewStdWriter(outStream, stdcopy.Systemerr) + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + for { + msg, ok := <-msgs + if !ok { + return + } + // check if the message contains an error. if so, write that error + // and exit + if msg.Err != nil { + fmt.Fprintf(sysErrStream, "Error grabbing logs: %v\n", msg.Err) + continue + } + logLine := msg.Line + if config.Details { + logLine = append([]byte(stringAttrs(msg.Attrs)+" "), logLine...) + } + if config.Timestamps { + // TODO(dperny) the format is defined in + // daemon/logger/logger.go as logger.TimeFormat. importing + // logger is verboten (not part of backend) so idk if just + // importing the same thing from jsonlog is good enough + logLine = append([]byte(msg.Timestamp.Format(jsonlog.RFC3339NanoFixed)+" "), logLine...) + } + if msg.Source == "stdout" && config.ShowStdout { + outStream.Write(logLine) + } + if msg.Source == "stderr" && config.ShowStderr { + errStream.Write(logLine) + } + } +} + +type byKey []string + +func (s byKey) Len() int { return len(s) } +func (s byKey) Less(i, j int) bool { + keyI := strings.Split(s[i], "=") + keyJ := strings.Split(s[j], "=") + return keyI[0] < keyJ[0] +} +func (s byKey) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func stringAttrs(a backend.LogAttributes) string { + var ss byKey + for k, v := range a { + k, v := url.QueryEscape(k), url.QueryEscape(v) + ss = append(ss, k+"="+v) + } + sort.Sort(ss) + return strings.Join(ss, ",") +} diff --git a/components/engine/api/server/middleware.go b/components/engine/api/server/middleware.go new file mode 100644 index 0000000000..537ce8028f --- /dev/null +++ b/components/engine/api/server/middleware.go @@ -0,0 +1,24 @@ +package server + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" +) + +// handlerWithGlobalMiddlewares wraps the handler function for a request with +// the server's global middlewares. The order of the middlewares is backwards, +// meaning that the first in the list will be evaluated last. +func (s *Server) handlerWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { + next := handler + + for _, m := range s.middlewares { + next = m.WrapHandler(next) + } + + if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { + next = middleware.DebugRequestMiddleware(next) + } + + return next +} diff --git a/components/engine/api/server/middleware/cors.go b/components/engine/api/server/middleware/cors.go new file mode 100644 index 0000000000..ea725dbc72 --- /dev/null +++ b/components/engine/api/server/middleware/cors.go @@ -0,0 +1,37 @@ +package middleware + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" +) + +// CORSMiddleware injects CORS headers to each request +// when it's configured. +type CORSMiddleware struct { + defaultHeaders string +} + +// NewCORSMiddleware creates a new CORSMiddleware with default headers. +func NewCORSMiddleware(d string) CORSMiddleware { + return CORSMiddleware{defaultHeaders: d} +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" + // otherwise, all head values will be passed to HTTP handler + corsHeaders := c.defaultHeaders + if corsHeaders == "" { + corsHeaders = "*" + } + + logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) + w.Header().Add("Access-Control-Allow-Origin", corsHeaders) + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") + w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") + return handler(ctx, w, r, vars) + } +} diff --git a/components/engine/api/server/middleware/debug.go b/components/engine/api/server/middleware/debug.go new file mode 100644 index 0000000000..8c8567669b --- /dev/null +++ b/components/engine/api/server/middleware/debug.go @@ -0,0 +1,76 @@ +package middleware + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +// DebugRequestMiddleware dumps the request to logger +func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + logrus.Debugf("Calling %s %s", r.Method, r.RequestURI) + + if r.Method != "POST" { + return handler(ctx, w, r, vars) + } + if err := httputils.CheckForJSON(r); err != nil { + return handler(ctx, w, r, vars) + } + maxBodySize := 4096 // 4KB + if r.ContentLength > int64(maxBodySize) { + return handler(ctx, w, r, vars) + } + + body := r.Body + bufReader := bufio.NewReaderSize(body, maxBodySize) + r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + b, err := bufReader.Peek(maxBodySize) + if err != io.EOF { + // either there was an error reading, or the buffer is full (in which case the request is too large) + return handler(ctx, w, r, vars) + } + + var postForm map[string]interface{} + if err := json.Unmarshal(b, &postForm); err == nil { + maskSecretKeys(postForm) + formStr, errMarshal := json.Marshal(postForm) + if errMarshal == nil { + logrus.Debugf("form data: %s", string(formStr)) + } else { + logrus.Debugf("form data: %q", postForm) + } + } + + return handler(ctx, w, r, vars) + } +} + +func maskSecretKeys(inp interface{}) { + if arr, ok := inp.([]interface{}); ok { + for _, f := range arr { + maskSecretKeys(f) + } + return + } + if form, ok := inp.(map[string]interface{}); ok { + loop0: + for k, v := range form { + for _, m := range []string{"password", "secret", "jointoken", "unlockkey"} { + if strings.EqualFold(m, k) { + form[k] = "*****" + continue loop0 + } + } + maskSecretKeys(v) + } + } +} diff --git a/components/engine/api/server/middleware/experimental.go b/components/engine/api/server/middleware/experimental.go new file mode 100644 index 0000000000..b8f56e88b4 --- /dev/null +++ b/components/engine/api/server/middleware/experimental.go @@ -0,0 +1,29 @@ +package middleware + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// ExperimentalMiddleware is a the middleware in charge of adding the +// 'Docker-Experimental' header to every outgoing request +type ExperimentalMiddleware struct { + experimental string +} + +// NewExperimentalMiddleware creates a new ExperimentalMiddleware +func NewExperimentalMiddleware(experimentalEnabled bool) ExperimentalMiddleware { + if experimentalEnabled { + return ExperimentalMiddleware{"true"} + } + return ExperimentalMiddleware{"false"} +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (e ExperimentalMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Docker-Experimental", e.experimental) + return handler(ctx, w, r, vars) + } +} diff --git a/components/engine/api/server/middleware/middleware.go b/components/engine/api/server/middleware/middleware.go new file mode 100644 index 0000000000..dc1f5bfa0d --- /dev/null +++ b/components/engine/api/server/middleware/middleware.go @@ -0,0 +1,13 @@ +package middleware + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// Middleware is an interface to allow the use of ordinary functions as Docker API filters. +// Any struct that has the appropriate signature can be registered as a middleware. +type Middleware interface { + WrapHandler(func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error +} diff --git a/components/engine/api/server/middleware/version.go b/components/engine/api/server/middleware/version.go new file mode 100644 index 0000000000..390a7f0594 --- /dev/null +++ b/components/engine/api/server/middleware/version.go @@ -0,0 +1,51 @@ +package middleware + +import ( + "fmt" + "net/http" + "runtime" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// VersionMiddleware is a middleware that +// validates the client and server versions. +type VersionMiddleware struct { + serverVersion string + defaultVersion string + minVersion string +} + +// NewVersionMiddleware creates a new VersionMiddleware +// with the default versions. +func NewVersionMiddleware(s, d, m string) VersionMiddleware { + return VersionMiddleware{ + serverVersion: s, + defaultVersion: d, + minVersion: m, + } +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + apiVersion := vars["version"] + if apiVersion == "" { + apiVersion = v.defaultVersion + } + + if versions.LessThan(apiVersion, v.minVersion) { + return errors.NewBadRequestError(fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, v.minVersion)) + } + + header := fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS) + w.Header().Set("Server", header) + w.Header().Set("API-Version", v.defaultVersion) + w.Header().Set("OSType", runtime.GOOS) + ctx = context.WithValue(ctx, "api-version", apiVersion) + return handler(ctx, w, r, vars) + } + +} diff --git a/components/engine/api/server/middleware/version_test.go b/components/engine/api/server/middleware/version_test.go new file mode 100644 index 0000000000..29787bf4d8 --- /dev/null +++ b/components/engine/api/server/middleware/version_test.go @@ -0,0 +1,57 @@ +package middleware + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +func TestVersionMiddleware(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatal("Expected version, got empty string") + } + return nil + } + + defaultVersion := "1.10.0" + minVersion := "1.2.0" + m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) + h := m.WrapHandler(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + if err := h(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} + +func TestVersionMiddlewareWithErrors(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatal("Expected version, got empty string") + } + return nil + } + + defaultVersion := "1.10.0" + minVersion := "1.2.0" + m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) + h := m.WrapHandler(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + vars := map[string]string{"version": "0.1"} + err := h(ctx, resp, req, vars) + + if !strings.Contains(err.Error(), "client version 0.1 is too old. Minimum supported API version is 1.2.0") { + t.Fatalf("Expected too old client error, got %v", err) + } +} diff --git a/components/engine/api/server/profiler.go b/components/engine/api/server/profiler.go new file mode 100644 index 0000000000..d49be338c8 --- /dev/null +++ b/components/engine/api/server/profiler.go @@ -0,0 +1,46 @@ +package server + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/gorilla/mux" +) + +const debugPathPrefix = "/debug/" + +func profilerSetup(mainRouter *mux.Router) { + var r = mainRouter.PathPrefix(debugPathPrefix).Subrouter() + r.HandleFunc("/vars", expVars) + r.HandleFunc("/pprof/", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.HandleFunc("/pprof/trace", pprof.Trace) + r.HandleFunc("/pprof/{name}", handlePprof) +} + +func handlePprof(w http.ResponseWriter, r *http.Request) { + var name string + if vars := mux.Vars(r); vars != nil { + name = vars["name"] + } + pprof.Handler(name).ServeHTTP(w, r) +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintln(w, "{") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintln(w, ",") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintln(w, "\n}") +} diff --git a/components/engine/api/server/router/build/backend.go b/components/engine/api/server/router/build/backend.go new file mode 100644 index 0000000000..835b11abba --- /dev/null +++ b/components/engine/api/server/router/build/backend.go @@ -0,0 +1,17 @@ +package build + +import ( + "github.com/docker/docker/api/types/backend" + "golang.org/x/net/context" +) + +// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. +type Backend interface { + // Build a Docker image returning the id of the image + // TODO: make this return a reference instead of string + Build(context.Context, backend.BuildConfig) (string, error) +} + +type experimentalProvider interface { + HasExperimental() bool +} diff --git a/components/engine/api/server/router/build/build.go b/components/engine/api/server/router/build/build.go new file mode 100644 index 0000000000..dcf0c53953 --- /dev/null +++ b/components/engine/api/server/router/build/build.go @@ -0,0 +1,28 @@ +package build + +import "github.com/docker/docker/api/server/router" + +// buildRouter is a router to talk with the build controller +type buildRouter struct { + backend Backend + daemon experimentalProvider + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend, d experimentalProvider) router.Router { + r := &buildRouter{backend: b, daemon: d} + r.initRoutes() + return r +} + +// Routes returns the available routers to the build controller +func (r *buildRouter) Routes() []router.Route { + return r.routes +} + +func (r *buildRouter) initRoutes() { + r.routes = []router.Route{ + router.NewPostRoute("/build", r.postBuild, router.WithCancel), + } +} diff --git a/components/engine/api/server/router/build/build_routes.go b/components/engine/api/server/router/build/build_routes.go new file mode 100644 index 0000000000..c90b5965b0 --- /dev/null +++ b/components/engine/api/server/router/build/build_routes.go @@ -0,0 +1,244 @@ +package build + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "runtime" + "strconv" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + units "github.com/docker/go-units" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { + version := httputils.VersionFromContext(ctx) + options := &types.ImageBuildOptions{} + if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") { + options.Remove = true + } else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") { + options.Remove = true + } else { + options.Remove = httputils.BoolValue(r, "rm") + } + if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") { + options.PullParent = true + } + + options.Dockerfile = r.FormValue("dockerfile") + options.SuppressOutput = httputils.BoolValue(r, "q") + options.NoCache = httputils.BoolValue(r, "nocache") + options.ForceRemove = httputils.BoolValue(r, "forcerm") + options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") + options.Memory = httputils.Int64ValueOrZero(r, "memory") + options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") + options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") + options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") + options.CPUSetCPUs = r.FormValue("cpusetcpus") + options.CPUSetMems = r.FormValue("cpusetmems") + options.CgroupParent = r.FormValue("cgroupparent") + options.NetworkMode = r.FormValue("networkmode") + options.Tags = r.Form["t"] + options.ExtraHosts = r.Form["extrahosts"] + options.SecurityOpt = r.Form["securityopt"] + options.Squash = httputils.BoolValue(r, "squash") + options.Target = r.FormValue("target") + options.RemoteContext = r.FormValue("remote") + + if r.Form.Get("shmsize") != "" { + shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) + if err != nil { + return nil, err + } + options.ShmSize = shmSize + } + + if i := container.Isolation(r.FormValue("isolation")); i != "" { + if !container.Isolation.IsValid(i) { + return nil, fmt.Errorf("Unsupported isolation: %q", i) + } + options.Isolation = i + } + + if runtime.GOOS != "windows" && options.SecurityOpt != nil { + return nil, fmt.Errorf("the daemon on this platform does not support --security-opt to build") + } + + var buildUlimits = []*units.Ulimit{} + ulimitsJSON := r.FormValue("ulimits") + if ulimitsJSON != "" { + if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil { + return nil, err + } + options.Ulimits = buildUlimits + } + + // Note that there are two ways a --build-arg might appear in the + // json of the query param: + // "foo":"bar" + // and "foo":nil + // The first is the normal case, ie. --build-arg foo=bar + // or --build-arg foo + // where foo's value was picked up from an env var. + // The second ("foo":nil) is where they put --build-arg foo + // but "foo" isn't set as an env var. In that case we can't just drop + // the fact they mentioned it, we need to pass that along to the builder + // so that it can print a warning about "foo" being unused if there is + // no "ARG foo" in the Dockerfile. + buildArgsJSON := r.FormValue("buildargs") + if buildArgsJSON != "" { + var buildArgs = map[string]*string{} + if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil { + return nil, err + } + options.BuildArgs = buildArgs + } + + labelsJSON := r.FormValue("labels") + if labelsJSON != "" { + var labels = map[string]string{} + if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil { + return nil, err + } + options.Labels = labels + } + + cacheFromJSON := r.FormValue("cachefrom") + if cacheFromJSON != "" { + var cacheFrom = []string{} + if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil { + return nil, err + } + options.CacheFrom = cacheFrom + } + + return options, nil +} + +func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + notVerboseBuffer = bytes.NewBuffer(nil) + version = httputils.VersionFromContext(ctx) + ) + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + errf := func(err error) error { + if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { + output.Write(notVerboseBuffer.Bytes()) + } + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an internal error. + if !output.Flushed() { + return err + } + _, err = w.Write(streamformatter.FormatError(err)) + if err != nil { + logrus.Warnf("could not write error response: %v", err) + } + return nil + } + + buildOptions, err := newImageBuildOptions(ctx, r) + if err != nil { + return errf(err) + } + buildOptions.AuthConfigs = getAuthConfigs(r.Header) + + if buildOptions.Squash && !br.daemon.HasExperimental() { + return apierrors.NewBadRequestError( + errors.New("squash is only supported with experimental mode")) + } + + out := io.Writer(output) + if buildOptions.SuppressOutput { + out = notVerboseBuffer + } + + // Currently, only used if context is from a remote url. + // Look at code in DetectContextFromRemoteURL for more information. + createProgressReader := func(in io.ReadCloser) io.ReadCloser { + progressOutput := streamformatter.NewJSONProgressOutput(out, true) + return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", buildOptions.RemoteContext) + } + + wantAux := versions.GreaterThanOrEqualTo(version, "1.30") + + imgID, err := br.backend.Build(ctx, backend.BuildConfig{ + Source: r.Body, + Options: buildOptions, + ProgressWriter: buildProgressWriter(out, wantAux, createProgressReader), + }) + if err != nil { + return errf(err) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if buildOptions.SuppressOutput { + fmt.Fprintln(streamformatter.NewStdoutWriter(output), imgID) + } + return nil +} + +func getAuthConfigs(header http.Header) map[string]types.AuthConfig { + authConfigs := map[string]types.AuthConfig{} + authConfigsEncoded := header.Get("X-Registry-Config") + + if authConfigsEncoded == "" { + return authConfigs + } + + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + // Pulling an image does not error when no auth is provided so to remain + // consistent with the existing api decode errors are ignored + json.NewDecoder(authConfigsJSON).Decode(&authConfigs) + return authConfigs +} + +type syncWriter struct { + w io.Writer + mu sync.Mutex +} + +func (s *syncWriter) Write(b []byte) (count int, err error) { + s.mu.Lock() + count, err = s.w.Write(b) + s.mu.Unlock() + return +} + +func buildProgressWriter(out io.Writer, wantAux bool, createProgressReader func(io.ReadCloser) io.ReadCloser) backend.ProgressWriter { + out = &syncWriter{w: out} + + var aux *streamformatter.AuxFormatter + if wantAux { + aux = &streamformatter.AuxFormatter{Writer: out} + } + + return backend.ProgressWriter{ + Output: out, + StdoutFormatter: streamformatter.NewStdoutWriter(out), + StderrFormatter: streamformatter.NewStderrWriter(out), + AuxFormatter: aux, + ProgressReaderFunc: createProgressReader, + } +} diff --git a/components/engine/api/server/router/checkpoint/backend.go b/components/engine/api/server/router/checkpoint/backend.go new file mode 100644 index 0000000000..8810f88b72 --- /dev/null +++ b/components/engine/api/server/router/checkpoint/backend.go @@ -0,0 +1,10 @@ +package checkpoint + +import "github.com/docker/docker/api/types" + +// Backend for Checkpoint +type Backend interface { + CheckpointCreate(container string, config types.CheckpointCreateOptions) error + CheckpointDelete(container string, config types.CheckpointDeleteOptions) error + CheckpointList(container string, config types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/components/engine/api/server/router/checkpoint/checkpoint.go b/components/engine/api/server/router/checkpoint/checkpoint.go new file mode 100644 index 0000000000..b169718232 --- /dev/null +++ b/components/engine/api/server/router/checkpoint/checkpoint.go @@ -0,0 +1,36 @@ +package checkpoint + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +// checkpointRouter is a router to talk with the checkpoint controller +type checkpointRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new checkpoint router +func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { + r := &checkpointRouter{ + backend: b, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the checkpoint controller +func (r *checkpointRouter) Routes() []router.Route { + return r.routes +} + +func (r *checkpointRouter) initRoutes() { + r.routes = []router.Route{ + router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints, router.Experimental), + router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint, router.Experimental), + router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint, router.Experimental), + } +} diff --git a/components/engine/api/server/router/checkpoint/checkpoint_routes.go b/components/engine/api/server/router/checkpoint/checkpoint_routes.go new file mode 100644 index 0000000000..f988431191 --- /dev/null +++ b/components/engine/api/server/router/checkpoint/checkpoint_routes.go @@ -0,0 +1,65 @@ +package checkpoint + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var options types.CheckpointCreateOptions + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&options); err != nil { + return err + } + + err := s.backend.CheckpointCreate(vars["name"], options) + if err != nil { + return err + } + + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + checkpoints, err := s.backend.CheckpointList(vars["name"], types.CheckpointListOptions{ + CheckpointDir: r.Form.Get("dir"), + }) + + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, checkpoints) +} + +func (s *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + err := s.backend.CheckpointDelete(vars["name"], types.CheckpointDeleteOptions{ + CheckpointDir: r.Form.Get("dir"), + CheckpointID: vars["checkpoint"], + }) + + if err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} diff --git a/components/engine/api/server/router/container/backend.go b/components/engine/api/server/router/container/backend.go new file mode 100644 index 0000000000..d51ed81776 --- /dev/null +++ b/components/engine/api/server/router/container/backend.go @@ -0,0 +1,79 @@ +package container + +import ( + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" +) + +// execBackend includes functions to implement to provide exec functionality. +type execBackend interface { + ContainerExecCreate(name string, config *types.ExecConfig) (string, error) + ContainerExecInspect(id string) (*backend.ExecInspect, error) + ContainerExecResize(name string, height, width int) error + ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error + ExecExists(name string) (bool, error) +} + +// copyBackend includes functions to implement to provide container copy functionality. +type copyBackend interface { + ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) + ContainerCopy(name string, res string) (io.ReadCloser, error) + ContainerExport(name string, out io.Writer) error + ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error + ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) +} + +// stateBackend includes functions to implement to provide container state lifecycle functionality. +type stateBackend interface { + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerKill(name string, sig uint64) error + ContainerPause(name string) error + ContainerRename(oldName, newName string) error + ContainerResize(name string, height, width int) error + ContainerRestart(name string, seconds *int) error + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStop(name string, seconds *int) error + ContainerUnpause(name string) error + ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) +} + +// monitorBackend includes functions to implement to provide containers monitoring functionality. +type monitorBackend interface { + ContainerChanges(name string) ([]archive.Change, error) + ContainerInspect(name string, size bool, version string) (interface{}, error) + ContainerLogs(ctx context.Context, name string, config *types.ContainerLogsOptions) (<-chan *backend.LogMessage, error) + ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error + ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error) + + Containers(config *types.ContainerListOptions) ([]*types.Container, error) +} + +// attachBackend includes function to implement to provide container attaching functionality. +type attachBackend interface { + ContainerAttach(name string, c *backend.ContainerAttachConfig) error +} + +// systemBackend includes functions to implement to provide system wide containers functionality +type systemBackend interface { + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error) +} + +// Backend is all the methods that need to be implemented to provide container specific functionality. +type Backend interface { + execBackend + copyBackend + stateBackend + monitorBackend + attachBackend + systemBackend +} diff --git a/components/engine/api/server/router/container/container.go b/components/engine/api/server/router/container/container.go new file mode 100644 index 0000000000..24c3224ee8 --- /dev/null +++ b/components/engine/api/server/router/container/container.go @@ -0,0 +1,77 @@ +package container + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +type validationError struct { + error +} + +func (validationError) IsValidationError() bool { + return true +} + +// containerRouter is a router to talk with the container controller +type containerRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new container router +func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { + r := &containerRouter{ + backend: b, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the container controller +func (r *containerRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in container router +func (r *containerRouter) initRoutes() { + r.routes = []router.Route{ + // HEAD + router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), + // GET + router.NewGetRoute("/containers/json", r.getContainersJSON), + router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), + router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), + router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), + router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), + router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs, router.WithCancel), + router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats, router.WithCancel), + router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), + router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), + router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), + // POST + router.NewPostRoute("/containers/create", r.postContainersCreate), + router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), + router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), + router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), + router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), + router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), + router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), + router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait, router.WithCancel), + router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), + router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), + router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12 + router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), + router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), + router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), + router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), + router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), + router.NewPostRoute("/containers/prune", r.postContainersPrune, router.WithCancel), + // PUT + router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), + // DELETE + router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), + } +} diff --git a/components/engine/api/server/router/container/container_routes.go b/components/engine/api/server/router/container/container_routes.go new file mode 100644 index 0000000000..0032fea7aa --- /dev/null +++ b/components/engine/api/server/router/container/container_routes.go @@ -0,0 +1,608 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "golang.org/x/net/context" + "golang.org/x/net/websocket" +) + +func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + config := &types.ContainerListOptions{ + All: httputils.BoolValue(r, "all"), + Size: httputils.BoolValue(r, "size"), + Since: r.Form.Get("since"), + Before: r.Form.Get("before"), + Filters: filter, + } + + if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { + limit, err := strconv.Atoi(tmpLimit) + if err != nil { + return err + } + config.Limit = limit + } + + containers, err := s.backend.Containers(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, containers) +} + +func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + stream := httputils.BoolValueOrDefault(r, "stream", true) + if !stream { + w.Header().Set("Content-Type", "application/json") + } + + config := &backend.ContainerStatsConfig{ + Stream: stream, + OutStream: w, + Version: string(httputils.VersionFromContext(ctx)), + } + + return s.backend.ContainerStats(ctx, vars["name"], config) +} + +func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + containerName := vars["name"] + logsConfig := &types.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + Details: httputils.BoolValue(r, "details"), + } + + // doesn't matter what version the client is on, we're using this internally only + // also do we need size? i'm thinkin no we don't + raw, err := s.backend.ContainerInspect(containerName, false, api.DefaultVersion) + if err != nil { + return err + } + container, ok := raw.(*types.ContainerJSON) + if !ok { + // %T prints the type. handy! + return fmt.Errorf("expected container to be *types.ContainerJSON but got %T", raw) + } + + msgs, err := s.backend.ContainerLogs(ctx, containerName, logsConfig) + if err != nil { + return err + } + + // if has a tty, we're not muxing streams. if it doesn't, we are. simple. + // this is the point of no return for writing a response. once we call + // WriteLogStream, the response has been started and errors will be + // returned in band by WriteLogStream + httputils.WriteLogStream(ctx, w, msgs, logsConfig, !container.Config.Tty) + return nil +} + +func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.backend.ContainerExport(vars["name"], w) +} + +func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // https://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + + version := httputils.VersionFromContext(ctx) + var hostConfig *container.HostConfig + // A non-nil json object is at least 7 characters. + if r.ContentLength > 7 || r.ContentLength == -1 { + if versions.GreaterThanOrEqualTo(version, "1.24") { + return validationError{fmt.Errorf("starting container with non-empty request body was deprecated since v1.10 and removed in v1.12")} + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + c, err := s.decoder.DecodeHostConfig(r.Body) + if err != nil { + return err + } + hostConfig = c + } + + if err := httputils.ParseForm(r); err != nil { + return err + } + + checkpoint := r.Form.Get("checkpoint") + checkpointDir := r.Form.Get("checkpoint-dir") + if err := s.backend.ContainerStart(vars["name"], hostConfig, checkpoint, checkpointDir); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var seconds *int + if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { + valSeconds, err := strconv.Atoi(tmpSeconds) + if err != nil { + return err + } + seconds = &valSeconds + } + + if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +type errContainerIsRunning interface { + ContainerIsRunning() bool +} + +func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var sig syscall.Signal + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := r.Form.Get("signal"); sigStr != "" { + var err error + if sig, err = signal.ParseSignal(sigStr); err != nil { + return err + } + } + + if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { + var isStopped bool + if e, ok := err.(errContainerIsRunning); ok { + isStopped = !e.ContainerIsRunning() + } + + // Return error that's not caused because the container is stopped. + // Return error if the container is not running and the api is >= 1.20 + // to keep backwards compatibility. + version := httputils.VersionFromContext(ctx) + if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped { + return fmt.Errorf("Cannot kill container %s: %v", name, err) + } + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var seconds *int + if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { + valSeconds, err := strconv.Atoi(tmpSeconds) + if err != nil { + return err + } + seconds = &valSeconds + } + + if err := s.backend.ContainerRestart(vars["name"], seconds); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerPause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerUnpause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // Behavior changed in version 1.30 to handle wait condition and to + // return headers immediately. + version := httputils.VersionFromContext(ctx) + legacyBehavior := versions.LessThan(version, "1.30") + + // The wait condition defaults to "not-running". + waitCondition := containerpkg.WaitConditionNotRunning + if !legacyBehavior { + if err := httputils.ParseForm(r); err != nil { + return err + } + switch container.WaitCondition(r.Form.Get("condition")) { + case container.WaitConditionNextExit: + waitCondition = containerpkg.WaitConditionNextExit + case container.WaitConditionRemoved: + waitCondition = containerpkg.WaitConditionRemoved + } + } + + // Note: the context should get canceled if the client closes the + // connection since this handler has been wrapped by the + // router.WithCancel() wrapper. + waitC, err := s.backend.ContainerWait(ctx, vars["name"], waitCondition) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + + if !legacyBehavior { + // Write response header immediately. + w.WriteHeader(http.StatusOK) + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + } + + // Block on the result of the wait operation. + status := <-waitC + + return json.NewEncoder(w).Encode(&container.ContainerWaitOKBody{ + StatusCode: int64(status.ExitCode()), + }) +} + +func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + changes, err := s.backend.ContainerChanges(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, changes) +} + +func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, procList) +} + +func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + newName := r.Form.Get("name") + if err := s.backend.ContainerRename(name, newName); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var updateConfig container.UpdateConfig + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&updateConfig); err != nil { + return err + } + + hostConfig := &container.HostConfig{ + Resources: updateConfig.Resources, + RestartPolicy: updateConfig.RestartPolicy, + } + + name := vars["name"] + resp, err := s.backend.ContainerUpdate(name, hostConfig) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, resp) +} + +func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + name := r.Form.Get("name") + + config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body) + if err != nil { + return err + } + version := httputils.VersionFromContext(ctx) + adjustCPUShares := versions.LessThan(version, "1.19") + + // When using API 1.24 and under, the client is responsible for removing the container + if hostConfig != nil && versions.LessThan(version, "1.25") { + hostConfig.AutoRemove = false + } + + ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ + Name: name, + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + AdjustCPUShares: adjustCPUShares, + }) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, ccr) +} + +func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.ContainerRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + RemoveVolume: httputils.BoolValue(r, "v"), + RemoveLink: httputils.BoolValue(r, "link"), + } + + if err := s.backend.ContainerRm(name, config); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerResize(vars["name"], height, width) +} + +func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + err := httputils.ParseForm(r) + if err != nil { + return err + } + containerName := vars["name"] + + _, upgrade := r.Header["Upgrade"] + detachKeys := r.FormValue("detachKeys") + + hijacker, ok := w.(http.Hijacker) + if !ok { + return fmt.Errorf("error attaching to container %s, hijack connection missing", containerName) + } + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + conn, _, err := hijacker.Hijack() + if err != nil { + return nil, nil, nil, err + } + + // set raw mode + conn.Write([]byte{}) + + if upgrade { + fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + closer := func() error { + httputils.CloseStreams(conn) + return nil + } + return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + UseStdin: httputils.BoolValue(r, "stdin"), + UseStdout: httputils.BoolValue(r, "stdout"), + UseStderr: httputils.BoolValue(r, "stderr"), + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: detachKeys, + MuxStreams: true, + } + + if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil { + logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) + // Remember to close stream if error happens + conn, _, errHijack := hijacker.Hijack() + if errHijack == nil { + statusCode := httputils.GetHTTPErrorStatusCode(err) + statusText := http.StatusText(statusCode) + fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error()) + httputils.CloseStreams(conn) + } else { + logrus.Errorf("Error Hijacking: %v", err) + } + } + return nil +} + +func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + containerName := vars["name"] + + var err error + detachKeys := r.FormValue("detachKeys") + + done := make(chan struct{}) + started := make(chan struct{}) + + version := httputils.VersionFromContext(ctx) + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + wsChan := make(chan *websocket.Conn) + h := func(conn *websocket.Conn) { + wsChan <- conn + <-done + } + + srv := websocket.Server{Handler: h, Handshake: nil} + go func() { + close(started) + srv.ServeHTTP(w, r) + }() + + conn := <-wsChan + // In case version 1.28 and above, a binary frame will be sent. + // See 28176 for details. + if versions.GreaterThanOrEqualTo(version, "1.28") { + conn.PayloadType = websocket.BinaryFrame + } + return conn, conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: detachKeys, + UseStdin: true, + UseStdout: true, + UseStderr: true, + MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr + } + + err = s.backend.ContainerAttach(containerName, attachConfig) + close(done) + select { + case <-started: + logrus.Errorf("Error attaching websocket: %s", err) + return nil + default: + } + return err +} + +func (s *containerRouter) postContainersPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := s.backend.ContainersPrune(ctx, pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/components/engine/api/server/router/container/copy.go b/components/engine/api/server/router/container/copy.go new file mode 100644 index 0000000000..5cfe8d7ba1 --- /dev/null +++ b/components/engine/api/server/router/container/copy.go @@ -0,0 +1,118 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// postContainersCopy is deprecated in favor of getContainersArchive. +func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // Deprecated since 1.8, Errors out since 1.12 + version := httputils.VersionFromContext(ctx) + if versions.GreaterThanOrEqualTo(version, "1.24") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cfg := types.CopyConfig{} + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + return err + } + + if cfg.Resource == "" { + return fmt.Errorf("Path cannot be empty") + } + + data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "no such container") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if os.IsNotExist(err) { + return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) + } + return err + } + defer data.Close() + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, data) + return err +} + +// // Encode the stat to JSON, base64 encode, and place in a header. +func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { + statJSON, err := json.Marshal(stat) + if err != nil { + return err + } + + header.Set( + "X-Docker-Container-Path-Stat", + base64.StdEncoding.EncodeToString(statJSON), + ) + + return nil +} + +func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + stat, err := s.backend.ContainerStatPath(v.Name, v.Path) + if err != nil { + return err + } + + return setContainerPathStatHeader(stat, w.Header()) +} + +func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) + if err != nil { + return err + } + defer tarArchive.Close() + + if err := setContainerPathStatHeader(stat, w.Header()); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, tarArchive) + + return err +} + +func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") + copyUIDGID := httputils.BoolValue(r, "copyUIDGID") + + return s.backend.ContainerExtractToDir(v.Name, v.Path, copyUIDGID, noOverwriteDirNonDir, r.Body) +} diff --git a/components/engine/api/server/router/container/exec.go b/components/engine/api/server/router/container/exec.go new file mode 100644 index 0000000000..1134a0e797 --- /dev/null +++ b/components/engine/api/server/router/container/exec.go @@ -0,0 +1,140 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/stdcopy" + "golang.org/x/net/context" +) + +func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + eConfig, err := s.backend.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, eConfig) +} + +func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + name := vars["name"] + + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { + return err + } + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } + + // Register an instance of Exec in container. + id, err := s.backend.ContainerExecCreate(name, execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %v", name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ + ID: id, + }) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + if versions.GreaterThan(version, "1.21") { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + } + + var ( + execName = vars["name"] + stdin, inStream io.ReadCloser + stdout, stderr, outStream io.Writer + ) + + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { + return err + } + + if exists, err := s.backend.ExecExists(execName); !exists { + return err + } + + if !execStartCheck.Detach { + var err error + // Setting up the streaming http interface. + inStream, outStream, err = httputils.HijackConnection(w) + if err != nil { + return err + } + defer httputils.CloseStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n") + } else { + fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n") + } + + // copy headers that were removed as part of hijack + if err := w.Header().WriteSubset(outStream, nil); err != nil { + return err + } + fmt.Fprint(outStream, "\r\n") + + stdin = inStream + stdout = outStream + if !execStartCheck.Tty { + stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + } + + // Now run the user process in container. + // Maybe we should we pass ctx here if we're not detaching? + if err := s.backend.ContainerExecStart(context.Background(), execName, stdin, stdout, stderr); err != nil { + if execStartCheck.Detach { + return err + } + stdout.Write([]byte(err.Error() + "\r\n")) + logrus.Errorf("Error running exec in container: %v", err) + } + return nil +} + +func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerExecResize(vars["name"], height, width) +} diff --git a/components/engine/api/server/router/container/inspect.go b/components/engine/api/server/router/container/inspect.go new file mode 100644 index 0000000000..dbbced7eee --- /dev/null +++ b/components/engine/api/server/router/container/inspect.go @@ -0,0 +1,21 @@ +package container + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// getContainersByName inspects container's configuration and serializes it as json. +func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + displaySize := httputils.BoolValue(r, "size") + + version := httputils.VersionFromContext(ctx) + json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, json) +} diff --git a/components/engine/api/server/router/distribution/backend.go b/components/engine/api/server/router/distribution/backend.go new file mode 100644 index 0000000000..fc3a80e59f --- /dev/null +++ b/components/engine/api/server/router/distribution/backend.go @@ -0,0 +1,14 @@ +package distribution + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Backend is all the methods that need to be implemented +// to provide image specific functionality. +type Backend interface { + GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) +} diff --git a/components/engine/api/server/router/distribution/distribution.go b/components/engine/api/server/router/distribution/distribution.go new file mode 100644 index 0000000000..c1fb7bc1e9 --- /dev/null +++ b/components/engine/api/server/router/distribution/distribution.go @@ -0,0 +1,31 @@ +package distribution + +import "github.com/docker/docker/api/server/router" + +// distributionRouter is a router to talk with the registry +type distributionRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new distribution router +func NewRouter(backend Backend) router.Router { + r := &distributionRouter{ + backend: backend, + } + r.initRoutes() + return r +} + +// Routes returns the available routes +func (r *distributionRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in the distribution router +func (r *distributionRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/distribution/{name:.*}/json", r.getDistributionInfo), + } +} diff --git a/components/engine/api/server/router/distribution/distribution_routes.go b/components/engine/api/server/router/distribution/distribution_routes.go new file mode 100644 index 0000000000..cc9e66a166 --- /dev/null +++ b/components/engine/api/server/router/distribution/distribution_routes.go @@ -0,0 +1,138 @@ +package distribution + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "strings" + + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + + var ( + config = &types.AuthConfig{} + authEncoded = r.Header.Get("X-Registry-Auth") + distributionInspect registrytypes.DistributionInspect + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + + image := vars["name"] + + ref, err := reference.ParseAnyReference(image) + if err != nil { + return err + } + namedRef, ok := ref.(reference.Named) + if !ok { + if _, ok := ref.(reference.Digested); ok { + // full image ID + return errors.Errorf("no manifest found for full image ID") + } + return errors.Errorf("unknown image reference format: %s", image) + } + + distrepo, _, err := s.backend.GetRepository(ctx, namedRef, config) + if err != nil { + return err + } + blobsrvc := distrepo.Blobs(ctx) + + if canonicalRef, ok := namedRef.(reference.Canonical); !ok { + namedRef = reference.TagNameOnly(namedRef) + + taggedRef, ok := namedRef.(reference.NamedTagged) + if !ok { + return errors.Errorf("image reference not tagged: %s", image) + } + + descriptor, err := distrepo.Tags(ctx).Get(ctx, taggedRef.Tag()) + if err != nil { + return err + } + distributionInspect.Descriptor = v1.Descriptor{ + MediaType: descriptor.MediaType, + Digest: descriptor.Digest, + Size: descriptor.Size, + } + } else { + // TODO(nishanttotla): Once manifests can be looked up as a blob, the + // descriptor should be set using blobsrvc.Stat(ctx, canonicalRef.Digest()) + // instead of having to manually fill in the fields + distributionInspect.Descriptor.Digest = canonicalRef.Digest() + } + + // we have a digest, so we can retrieve the manifest + mnfstsrvc, err := distrepo.Manifests(ctx) + if err != nil { + return err + } + mnfst, err := mnfstsrvc.Get(ctx, distributionInspect.Descriptor.Digest) + if err != nil { + return err + } + + mediaType, payload, err := mnfst.Payload() + if err != nil { + return err + } + // update MediaType because registry might return something incorrect + distributionInspect.Descriptor.MediaType = mediaType + if distributionInspect.Descriptor.Size == 0 { + distributionInspect.Descriptor.Size = int64(len(payload)) + } + + // retrieve platform information depending on the type of manifest + switch mnfstObj := mnfst.(type) { + case *manifestlist.DeserializedManifestList: + for _, m := range mnfstObj.Manifests { + distributionInspect.Platforms = append(distributionInspect.Platforms, v1.Platform{ + Architecture: m.Platform.Architecture, + OS: m.Platform.OS, + OSVersion: m.Platform.OSVersion, + OSFeatures: m.Platform.OSFeatures, + Variant: m.Platform.Variant, + }) + } + case *schema2.DeserializedManifest: + configJSON, err := blobsrvc.Get(ctx, mnfstObj.Config.Digest) + var platform v1.Platform + if err == nil { + err := json.Unmarshal(configJSON, &platform) + if err == nil && (platform.OS != "" || platform.Architecture != "") { + distributionInspect.Platforms = append(distributionInspect.Platforms, platform) + } + } + case *schema1.SignedManifest: + platform := v1.Platform{ + Architecture: mnfstObj.Architecture, + OS: "linux", + } + distributionInspect.Platforms = append(distributionInspect.Platforms, platform) + } + + return httputils.WriteJSON(w, http.StatusOK, distributionInspect) +} diff --git a/components/engine/api/server/router/experimental.go b/components/engine/api/server/router/experimental.go new file mode 100644 index 0000000000..51385c2552 --- /dev/null +++ b/components/engine/api/server/router/experimental.go @@ -0,0 +1,67 @@ +package router + +import ( + "errors" + "net/http" + + "golang.org/x/net/context" + + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" +) + +var ( + errExperimentalFeature = errors.New("This experimental feature is disabled by default. Start the Docker daemon with --experimental in order to enable it.") +) + +// ExperimentalRoute defines an experimental API route that can be enabled or disabled. +type ExperimentalRoute interface { + Route + + Enable() + Disable() +} + +// experimentalRoute defines an experimental API route that can be enabled or disabled. +// It implements ExperimentalRoute +type experimentalRoute struct { + local Route + handler httputils.APIFunc +} + +// Enable enables this experimental route +func (r *experimentalRoute) Enable() { + r.handler = r.local.Handler() +} + +// Disable disables the experimental route +func (r *experimentalRoute) Disable() { + r.handler = experimentalHandler +} + +func experimentalHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return apierrors.NewErrorWithStatusCode(errExperimentalFeature, http.StatusNotImplemented) +} + +// Handler returns returns the APIFunc to let the server wrap it in middlewares. +func (r *experimentalRoute) Handler() httputils.APIFunc { + return r.handler +} + +// Method returns the http method that the route responds to. +func (r *experimentalRoute) Method() string { + return r.local.Method() +} + +// Path returns the subpath where the route responds to. +func (r *experimentalRoute) Path() string { + return r.local.Path() +} + +// Experimental will mark a route as experimental. +func Experimental(r Route) Route { + return &experimentalRoute{ + local: r, + handler: experimentalHandler, + } +} diff --git a/components/engine/api/server/router/image/backend.go b/components/engine/api/server/router/image/backend.go new file mode 100644 index 0000000000..3b9ed96147 --- /dev/null +++ b/components/engine/api/server/router/image/backend.go @@ -0,0 +1,46 @@ +package image + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// Backend is all the methods that need to be implemented +// to provide image specific functionality. +type Backend interface { + containerBackend + imageBackend + importExportBackend + registryBackend +} + +type containerBackend interface { + Commit(name string, config *backend.ContainerCommitConfig) (imageID string, err error) +} + +type imageBackend interface { + ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) + ImageHistory(imageName string) ([]*image.HistoryResponseItem, error) + Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) + LookupImage(name string) (*types.ImageInspect, error) + TagImage(imageName, repository, tag string) error + ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) +} + +type importExportBackend interface { + LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error + ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error + ExportImage(names []string, outStream io.Writer) error +} + +type registryBackend interface { + PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) +} diff --git a/components/engine/api/server/router/image/image.go b/components/engine/api/server/router/image/image.go new file mode 100644 index 0000000000..6c233d900c --- /dev/null +++ b/components/engine/api/server/router/image/image.go @@ -0,0 +1,50 @@ +package image + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +// imageRouter is a router to talk with the image controller +type imageRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new image router +func NewRouter(backend Backend, decoder httputils.ContainerDecoder) router.Router { + r := &imageRouter{ + backend: backend, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the image controller +func (r *imageRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in the image router +func (r *imageRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/images/json", r.getImagesJSON), + router.NewGetRoute("/images/search", r.getImagesSearch), + router.NewGetRoute("/images/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), + router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName), + // POST + router.NewPostRoute("/commit", r.postCommit), + router.NewPostRoute("/images/load", r.postImagesLoad), + router.NewPostRoute("/images/create", r.postImagesCreate, router.WithCancel), + router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush, router.WithCancel), + router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), + router.NewPostRoute("/images/prune", r.postImagesPrune, router.WithCancel), + // DELETE + router.NewDeleteRoute("/images/{name:.*}", r.deleteImages), + } +} diff --git a/components/engine/api/server/router/image/image_routes.go b/components/engine/api/server/router/image/image_routes.go new file mode 100644 index 0000000000..465182caa1 --- /dev/null +++ b/components/engine/api/server/router/image/image_routes.go @@ -0,0 +1,341 @@ +package image + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +func (s *imageRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cname := r.Form.Get("container") + + pause := httputils.BoolValue(r, "pause") + version := httputils.VersionFromContext(ctx) + if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") { + pause = true + } + + c, _, _, err := s.decoder.DecodeConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err + } + if c == nil { + c = &container.Config{} + } + + commitCfg := &backend.ContainerCommitConfig{ + ContainerCommitConfig: types.ContainerCommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Config: c, + MergeConfigs: true, + }, + Changes: r.Form["changes"], + } + + imgID, err := s.backend.Commit(cname, commitCfg) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ + ID: string(imgID), + }) +} + +// Creates an image from Pull or from Import +func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + message = r.Form.Get("message") + err error + output = ioutils.NewWriteFlusher(w) + ) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if image != "" { //pull + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } + + err = s.backend.PullImage(ctx, image, tag, metaHeaders, authConfig, output) + } else { //import + src := r.Form.Get("fromSrc") + // 'err' MUST NOT be defined within this block, we need any error + // generated from the download to be available to the output + // stream processing below + err = s.backend.ImportImage(src, repo, tag, message, r.Body, output, r.Form["changes"]) + } + if err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + + return nil +} + +func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := httputils.ParseForm(r); err != nil { + return err + } + authConfig := &types.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) + } + } + + image := vars["name"] + tag := r.Form.Get("tag") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + return nil +} + +func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + var names []string + if name, ok := vars["name"]; ok { + names = []string{name} + } else { + names = r.Form["names"] + } + + if err := s.backend.ExportImage(names, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + return nil +} + +func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + quiet := httputils.BoolValueOrDefault(r, "quiet", true) + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { + output.Write(streamformatter.FormatError(err)) + } + return nil +} + +func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + + if strings.TrimSpace(name) == "" { + return fmt.Errorf("image name cannot be blank") + } + + force := httputils.BoolValue(r, "force") + prune := !httputils.BoolValue(r, "noprune") + + list, err := s.backend.ImageDelete(name, force, prune) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + imageInspect, err := s.backend.LookupImage(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, imageInspect) +} + +func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + imageFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + filterParam := r.Form.Get("filter") + // FIXME(vdemeester) This has been deprecated in 1.13, and is target for removal for v17.12 + if filterParam != "" { + imageFilters.Add("reference", filterParam) + } + + images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, images) +} + +func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + name := vars["name"] + history, err := s.backend.ImageHistory(name) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, history) +} + +func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + var ( + config *types.AuthConfig + authEncoded = r.Header.Get("X-Registry-Auth") + headers = map[string][]string{} + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + headers[k] = v + } + } + limit := registry.DefaultSearchLimit + if r.Form.Get("limit") != "" { + limitValue, err := strconv.Atoi(r.Form.Get("limit")) + if err != nil { + return err + } + limit = limitValue + } + query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), limit, config, headers) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, query.Results) +} + +func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := s.backend.ImagesPrune(ctx, pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/components/engine/api/server/router/local.go b/components/engine/api/server/router/local.go new file mode 100644 index 0000000000..ba70f34132 --- /dev/null +++ b/components/engine/api/server/router/local.go @@ -0,0 +1,104 @@ +package router + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// RouteWrapper wraps a route with extra functionality. +// It is passed in when creating a new route. +type RouteWrapper func(r Route) Route + +// localRoute defines an individual API route to connect +// with the docker daemon. It implements Route. +type localRoute struct { + method string + path string + handler httputils.APIFunc +} + +// Handler returns the APIFunc to let the server wrap it in middlewares. +func (l localRoute) Handler() httputils.APIFunc { + return l.handler +} + +// Method returns the http method that the route responds to. +func (l localRoute) Method() string { + return l.method +} + +// Path returns the subpath where the route responds to. +func (l localRoute) Path() string { + return l.path +} + +// NewRoute initializes a new local route for the router. +func NewRoute(method, path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + var r Route = localRoute{method, path, handler} + for _, o := range opts { + r = o(r) + } + return r +} + +// NewGetRoute initializes a new route with the http method GET. +func NewGetRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("GET", path, handler, opts...) +} + +// NewPostRoute initializes a new route with the http method POST. +func NewPostRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("POST", path, handler, opts...) +} + +// NewPutRoute initializes a new route with the http method PUT. +func NewPutRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("PUT", path, handler, opts...) +} + +// NewDeleteRoute initializes a new route with the http method DELETE. +func NewDeleteRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("DELETE", path, handler, opts...) +} + +// NewOptionsRoute initializes a new route with the http method OPTIONS. +func NewOptionsRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("OPTIONS", path, handler, opts...) +} + +// NewHeadRoute initializes a new route with the http method HEAD. +func NewHeadRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("HEAD", path, handler, opts...) +} + +func cancellableHandler(h httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if notifier, ok := w.(http.CloseNotifier); ok { + notify := notifier.CloseNotify() + notifyCtx, cancel := context.WithCancel(ctx) + finished := make(chan struct{}) + defer close(finished) + ctx = notifyCtx + go func() { + select { + case <-notify: + cancel() + case <-finished: + } + }() + } + return h(ctx, w, r, vars) + } +} + +// WithCancel makes new route which embeds http.CloseNotifier feature to +// context.Context of handler. +func WithCancel(r Route) Route { + return localRoute{ + method: r.Method(), + path: r.Path(), + handler: cancellableHandler(r.Handler()), + } +} diff --git a/components/engine/api/server/router/network/backend.go b/components/engine/api/server/router/network/backend.go new file mode 100644 index 0000000000..a32a0b9c00 --- /dev/null +++ b/components/engine/api/server/router/network/backend.go @@ -0,0 +1,22 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/libnetwork" +) + +// Backend is all the methods that need to be implemented +// to provide network specific functionality. +type Backend interface { + FindNetwork(idName string) (libnetwork.Network, error) + GetNetworks() []libnetwork.Network + CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) + ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error + DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error + DeleteNetwork(name string) error + NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) +} diff --git a/components/engine/api/server/router/network/filter.go b/components/engine/api/server/router/network/filter.go new file mode 100644 index 0000000000..afe4e235e3 --- /dev/null +++ b/components/engine/api/server/router/network/filter.go @@ -0,0 +1,87 @@ +package network + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/runconfig" +) + +func filterNetworkByType(nws []types.NetworkResource, netType string) ([]types.NetworkResource, error) { + retNws := []types.NetworkResource{} + switch netType { + case "builtin": + for _, nw := range nws { + if runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + case "custom": + for _, nw := range nws { + if !runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + default: + return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType) + } + return retNws, nil +} + +// filterNetworks filters network list according to user specified filter +// and returns user chosen networks +func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) { + // if filter is empty, return original network list + if filter.Len() == 0 { + return nws, nil + } + + displayNet := []types.NetworkResource{} + for _, nw := range nws { + if filter.Include("driver") { + if !filter.ExactMatch("driver", nw.Driver) { + continue + } + } + if filter.Include("name") { + if !filter.Match("name", nw.Name) { + continue + } + } + if filter.Include("id") { + if !filter.Match("id", nw.ID) { + continue + } + } + if filter.Include("label") { + if !filter.MatchKVList("label", nw.Labels) { + continue + } + } + if filter.Include("scope") { + if !filter.ExactMatch("scope", nw.Scope) { + continue + } + } + displayNet = append(displayNet, nw) + } + + if filter.Include("type") { + typeNet := []types.NetworkResource{} + errFilter := filter.WalkValues("type", func(fval string) error { + passList, err := filterNetworkByType(displayNet, fval) + if err != nil { + return err + } + typeNet = append(typeNet, passList...) + return nil + }) + if errFilter != nil { + return nil, errFilter + } + displayNet = typeNet + } + + return displayNet, nil +} diff --git a/components/engine/api/server/router/network/filter_test.go b/components/engine/api/server/router/network/filter_test.go new file mode 100644 index 0000000000..af3e949546 --- /dev/null +++ b/components/engine/api/server/router/network/filter_test.go @@ -0,0 +1,149 @@ +// +build !windows + +package network + +import ( + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +func TestFilterNetworks(t *testing.T) { + networks := []types.NetworkResource{ + { + Name: "host", + Driver: "host", + Scope: "local", + }, + { + Name: "bridge", + Driver: "bridge", + Scope: "local", + }, + { + Name: "none", + Driver: "null", + Scope: "local", + }, + { + Name: "myoverlay", + Driver: "overlay", + Scope: "swarm", + }, + { + Name: "mydrivernet", + Driver: "mydriver", + Scope: "local", + }, + { + Name: "mykvnet", + Driver: "mykvdriver", + Scope: "global", + }, + } + + bridgeDriverFilters := filters.NewArgs() + bridgeDriverFilters.Add("driver", "bridge") + + overlayDriverFilters := filters.NewArgs() + overlayDriverFilters.Add("driver", "overlay") + + nonameDriverFilters := filters.NewArgs() + nonameDriverFilters.Add("driver", "noname") + + customDriverFilters := filters.NewArgs() + customDriverFilters.Add("type", "custom") + + builtinDriverFilters := filters.NewArgs() + builtinDriverFilters.Add("type", "builtin") + + invalidDriverFilters := filters.NewArgs() + invalidDriverFilters.Add("type", "invalid") + + localScopeFilters := filters.NewArgs() + localScopeFilters.Add("scope", "local") + + swarmScopeFilters := filters.NewArgs() + swarmScopeFilters.Add("scope", "swarm") + + globalScopeFilters := filters.NewArgs() + globalScopeFilters.Add("scope", "global") + + testCases := []struct { + filter filters.Args + resultCount int + err string + }{ + { + filter: bridgeDriverFilters, + resultCount: 1, + err: "", + }, + { + filter: overlayDriverFilters, + resultCount: 1, + err: "", + }, + { + filter: nonameDriverFilters, + resultCount: 0, + err: "", + }, + { + filter: customDriverFilters, + resultCount: 3, + err: "", + }, + { + filter: builtinDriverFilters, + resultCount: 3, + err: "", + }, + { + filter: invalidDriverFilters, + resultCount: 0, + err: "Invalid filter: 'type'='invalid'", + }, + { + filter: localScopeFilters, + resultCount: 4, + err: "", + }, + { + filter: swarmScopeFilters, + resultCount: 1, + err: "", + }, + { + filter: globalScopeFilters, + resultCount: 1, + err: "", + }, + } + + for _, testCase := range testCases { + result, err := filterNetworks(networks, testCase.filter) + if testCase.err != "" { + if err == nil { + t.Fatalf("expect error '%s', got no error", testCase.err) + + } else if !strings.Contains(err.Error(), testCase.err) { + t.Fatalf("expect error '%s', got '%s'", testCase.err, err) + } + } else { + if err != nil { + t.Fatalf("expect no error, got error '%s'", err) + } + // Make sure result is not nil + if result == nil { + t.Fatal("filterNetworks should not return nil") + } + + if len(result) != testCase.resultCount { + t.Fatalf("expect '%d' networks, got '%d' networks", testCase.resultCount, len(result)) + } + } + } +} diff --git a/components/engine/api/server/router/network/network.go b/components/engine/api/server/router/network/network.go new file mode 100644 index 0000000000..eaf52aa2a9 --- /dev/null +++ b/components/engine/api/server/router/network/network.go @@ -0,0 +1,44 @@ +package network + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/daemon/cluster" +) + +// networkRouter is a router to talk with the network controller +type networkRouter struct { + backend Backend + cluster *cluster.Cluster + routes []router.Route +} + +// NewRouter initializes a new network router +func NewRouter(b Backend, c *cluster.Cluster) router.Router { + r := &networkRouter{ + backend: b, + cluster: c, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the network controller +func (r *networkRouter) Routes() []router.Route { + return r.routes +} + +func (r *networkRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/networks", r.getNetworksList), + router.NewGetRoute("/networks/", r.getNetworksList), + router.NewGetRoute("/networks/{id:.+}", r.getNetwork), + // POST + router.NewPostRoute("/networks/create", r.postNetworkCreate), + router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect), + router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect), + router.NewPostRoute("/networks/prune", r.postNetworksPrune, router.WithCancel), + // DELETE + router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork), + } +} diff --git a/components/engine/api/server/router/network/network_routes.go b/components/engine/api/server/router/network/network_routes.go new file mode 100644 index 0000000000..05faffddb1 --- /dev/null +++ b/components/engine/api/server/router/network/network_routes.go @@ -0,0 +1,461 @@ +package network + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/networkdb" +) + +var ( + // acceptedNetworkFilters is a list of acceptable filters + acceptedNetworkFilters = map[string]bool{ + "driver": true, + "type": true, + "name": true, + "id": true, + "label": true, + "scope": true, + } +) + +func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + filter := r.Form.Get("filters") + netFilters, err := filters.FromParam(filter) + if err != nil { + return err + } + + if err := netFilters.Validate(acceptedNetworkFilters); err != nil { + return err + } + + list := []types.NetworkResource{} + + if nr, err := n.cluster.GetNetworks(); err == nil { + list = append(list, nr...) + } + + // Combine the network list returned by Docker daemon if it is not already + // returned by the cluster manager +SKIP: + for _, nw := range n.backend.GetNetworks() { + for _, nl := range list { + if nl.ID == nw.ID() { + continue SKIP + } + } + + var nr *types.NetworkResource + // Versions < 1.28 fetches all the containers attached to a network + // in a network list api call. It is a heavy weight operation when + // run across all the networks. Starting API version 1.28, this detailed + // info is available for network specific GET API (equivalent to inspect) + if versions.LessThan(httputils.VersionFromContext(ctx), "1.28") { + nr = n.buildDetailedNetworkResources(nw, false) + } else { + nr = n.buildNetworkResource(nw) + } + list = append(list, *nr) + } + + list, err = filterNetworks(list, netFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + term := vars["id"] + var ( + verbose bool + err error + ) + if v := r.URL.Query().Get("verbose"); v != "" { + if verbose, err = strconv.ParseBool(v); err != nil { + err = fmt.Errorf("invalid value for verbose: %s", v) + return errors.NewBadRequestError(err) + } + } + + // In case multiple networks have duplicate names, return error. + // TODO (yongtang): should we wrap with version here for backward compatibility? + + // First find based on full ID, return immediately once one is found. + // If a network appears both in swarm and local, assume it is in local first + + // For full name and partial ID, save the result first, and process later + // in case multiple records was found based on the same term + listByFullName := map[string]types.NetworkResource{} + listByPartialID := map[string]types.NetworkResource{} + + nw := n.backend.GetNetworks() + for _, network := range nw { + if network.ID() == term { + return httputils.WriteJSON(w, http.StatusOK, *n.buildDetailedNetworkResources(network, verbose)) + } + if network.Name() == term { + // No need to check the ID collision here as we are still in + // local scope and the network ID is unique in this scope. + listByFullName[network.ID()] = *n.buildDetailedNetworkResources(network, verbose) + } + if strings.HasPrefix(network.ID(), term) { + // No need to check the ID collision here as we are still in + // local scope and the network ID is unique in this scope. + listByPartialID[network.ID()] = *n.buildDetailedNetworkResources(network, verbose) + } + } + + nr, _ := n.cluster.GetNetworks() + for _, network := range nr { + if network.ID == term { + return httputils.WriteJSON(w, http.StatusOK, network) + } + if network.Name == term { + // Check the ID collision as we are in swarm scope here, and + // the map (of the listByFullName) may have already had a + // network with the same ID (from local scope previously) + if _, ok := listByFullName[network.ID]; !ok { + listByFullName[network.ID] = network + } + } + if strings.HasPrefix(network.ID, term) { + // Check the ID collision as we are in swarm scope here, and + // the map (of the listByPartialID) may have already had a + // network with the same ID (from local scope previously) + if _, ok := listByPartialID[network.ID]; !ok { + listByPartialID[network.ID] = network + } + } + } + + // Find based on full name, returns true only if no duplicates + if len(listByFullName) == 1 { + for _, v := range listByFullName { + return httputils.WriteJSON(w, http.StatusOK, v) + } + } + if len(listByFullName) > 1 { + return fmt.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)) + } + + // Find based on partial ID, returns true only if no duplicates + if len(listByPartialID) == 1 { + for _, v := range listByPartialID { + return httputils.WriteJSON(w, http.StatusOK, v) + } + } + if len(listByPartialID) > 1 { + return fmt.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)) + } + + return libnetwork.ErrNoSuchNetwork(term) +} + +func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var create types.NetworkCreateRequest + + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&create); err != nil { + return err + } + + if nws, err := n.cluster.GetNetworksByName(create.Name); err == nil && len(nws) > 0 { + return libnetwork.NetworkNameError(create.Name) + } + + nw, err := n.backend.CreateNetwork(create) + if err != nil { + var warning string + if _, ok := err.(libnetwork.NetworkNameError); ok { + // check if user defined CheckDuplicate, if set true, return err + // otherwise prepare a warning message + if create.CheckDuplicate { + return libnetwork.NetworkNameError(create.Name) + } + warning = libnetwork.NetworkNameError(create.Name).Error() + } + + if _, ok := err.(libnetwork.ManagerRedirectError); !ok { + return err + } + id, err := n.cluster.CreateNetwork(create) + if err != nil { + return err + } + nw = &types.NetworkCreateResponse{ + ID: id, + Warning: warning, + } + } + + return httputils.WriteJSON(w, http.StatusCreated, nw) +} + +func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var connect types.NetworkConnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&connect); err != nil { + return err + } + + return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig) +} + +func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var disconnect types.NetworkDisconnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil { + return err + } + + return n.backend.DisconnectContainerFromNetwork(disconnect.Container, vars["id"], disconnect.Force) +} + +func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if _, err := n.cluster.GetNetwork(vars["id"]); err == nil { + if err = n.cluster.RemoveNetwork(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil + } + if err := n.backend.DeleteNetwork(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource { + r := &types.NetworkResource{} + if nw == nil { + return r + } + + info := nw.Info() + r.Name = nw.Name() + r.ID = nw.ID() + r.Created = info.Created() + r.Scope = info.Scope() + r.Driver = nw.Type() + r.EnableIPv6 = info.IPv6Enabled() + r.Internal = info.Internal() + r.Attachable = info.Attachable() + r.Ingress = info.Ingress() + r.Options = info.DriverOptions() + r.Containers = make(map[string]types.EndpointResource) + buildIpamResources(r, info) + r.Labels = info.Labels() + r.ConfigOnly = info.ConfigOnly() + + if cn := info.ConfigFrom(); cn != "" { + r.ConfigFrom = network.ConfigReference{Network: cn} + } + + peers := info.Peers() + if len(peers) != 0 { + r.Peers = buildPeerInfoResources(peers) + } + + return r +} + +func (n *networkRouter) buildDetailedNetworkResources(nw libnetwork.Network, verbose bool) *types.NetworkResource { + if nw == nil { + return &types.NetworkResource{} + } + + r := n.buildNetworkResource(nw) + epl := nw.Endpoints() + for _, e := range epl { + ei := e.Info() + if ei == nil { + continue + } + sb := ei.Sandbox() + tmpID := e.ID() + key := "ep-" + tmpID + if sb != nil { + key = sb.ContainerID() + } + + r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei) + } + if !verbose { + return r + } + services := nw.Info().Services() + r.Services = make(map[string]network.ServiceInfo) + for name, service := range services { + tasks := []network.Task{} + for _, t := range service.Tasks { + tasks = append(tasks, network.Task{ + Name: t.Name, + EndpointID: t.EndpointID, + EndpointIP: t.EndpointIP, + Info: t.Info, + }) + } + r.Services[name] = network.ServiceInfo{ + VIP: service.VIP, + Ports: service.Ports, + Tasks: tasks, + LocalLBIndex: service.LocalLBIndex, + } + } + return r +} + +func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo { + peerInfo := make([]network.PeerInfo, 0, len(peers)) + for _, peer := range peers { + peerInfo = append(peerInfo, network.PeerInfo{ + Name: peer.Name, + IP: peer.IP, + }) + } + return peerInfo +} + +func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) { + id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig() + + ipv4Info, ipv6Info := nwInfo.IpamInfo() + + r.IPAM.Driver = id + + r.IPAM.Options = opts + + r.IPAM.Config = []network.IPAMConfig{} + for _, ip4 := range ipv4conf { + if ip4.PreferredPool == "" { + continue + } + iData := network.IPAMConfig{} + iData.Subnet = ip4.PreferredPool + iData.IPRange = ip4.SubPool + iData.Gateway = ip4.Gateway + iData.AuxAddress = ip4.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if len(r.IPAM.Config) == 0 { + for _, ip4Info := range ipv4Info { + iData := network.IPAMConfig{} + iData.Subnet = ip4Info.IPAMData.Pool.String() + iData.Gateway = ip4Info.IPAMData.Gateway.IP.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } + + hasIpv6Conf := false + for _, ip6 := range ipv6conf { + if ip6.PreferredPool == "" { + continue + } + hasIpv6Conf = true + iData := network.IPAMConfig{} + iData.Subnet = ip6.PreferredPool + iData.IPRange = ip6.SubPool + iData.Gateway = ip6.Gateway + iData.AuxAddress = ip6.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if !hasIpv6Conf { + for _, ip6Info := range ipv6Info { + iData := network.IPAMConfig{} + iData.Subnet = ip6Info.IPAMData.Pool.String() + iData.Gateway = ip6Info.IPAMData.Gateway.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } +} + +func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource { + er := types.EndpointResource{} + + er.EndpointID = id + er.Name = name + ei := info + if ei == nil { + return er + } + + if iface := ei.Iface(); iface != nil { + if mac := iface.MacAddress(); mac != nil { + er.MacAddress = mac.String() + } + if ip := iface.Address(); ip != nil && len(ip.IP) > 0 { + er.IPv4Address = ip.String() + } + + if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 { + er.IPv6Address = ipv6.String() + } + } + return er +} + +func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := n.backend.NetworksPrune(ctx, pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/components/engine/api/server/router/plugin/backend.go b/components/engine/api/server/router/plugin/backend.go new file mode 100644 index 0000000000..a5f3c9790a --- /dev/null +++ b/components/engine/api/server/router/plugin/backend.go @@ -0,0 +1,26 @@ +package plugin + +import ( + "io" + "net/http" + + "github.com/docker/distribution/reference" + enginetypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// Backend for Plugin +type Backend interface { + Disable(name string, config *enginetypes.PluginDisableConfig) error + Enable(name string, config *enginetypes.PluginEnableConfig) error + List(filters.Args) ([]enginetypes.Plugin, error) + Inspect(name string) (*enginetypes.Plugin, error) + Remove(name string, config *enginetypes.PluginRmConfig) error + Set(name string, args []string) error + Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) + Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error + Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error +} diff --git a/components/engine/api/server/router/plugin/plugin.go b/components/engine/api/server/router/plugin/plugin.go new file mode 100644 index 0000000000..22819e27a9 --- /dev/null +++ b/components/engine/api/server/router/plugin/plugin.go @@ -0,0 +1,39 @@ +package plugin + +import "github.com/docker/docker/api/server/router" + +// pluginRouter is a router to talk with the plugin controller +type pluginRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new plugin router +func NewRouter(b Backend) router.Router { + r := &pluginRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the plugin controller +func (r *pluginRouter) Routes() []router.Route { + return r.routes +} + +func (r *pluginRouter) initRoutes() { + r.routes = []router.Route{ + router.NewGetRoute("/plugins", r.listPlugins), + router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin), + router.NewGetRoute("/plugins/privileges", r.getPrivileges), + router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin), + router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH? + router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin), + router.NewPostRoute("/plugins/pull", r.pullPlugin, router.WithCancel), + router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin, router.WithCancel), + router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin, router.WithCancel), + router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin), + router.NewPostRoute("/plugins/create", r.createPlugin), + } +} diff --git a/components/engine/api/server/router/plugin/plugin_routes.go b/components/engine/api/server/router/plugin/plugin_routes.go new file mode 100644 index 0000000000..79e3cf5de8 --- /dev/null +++ b/components/engine/api/server/router/plugin/plugin_routes.go @@ -0,0 +1,310 @@ +package plugin + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "strconv" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) { + + metaHeaders := map[string][]string{} + for k, v := range headers { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + // Get X-Registry-Auth + authEncoded := headers.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + authConfig = &types.AuthConfig{} + } + } + + return metaHeaders, authConfig +} + +// parseRemoteRef parses the remote reference into a reference.Named +// returning the tag associated with the reference. In the case the +// given reference string includes both digest and tag, the returned +// reference will have the digest without the tag, but the tag will +// be returned. +func parseRemoteRef(remote string) (reference.Named, string, error) { + // Parse remote reference, supporting remotes with name and tag + remoteRef, err := reference.ParseNormalizedNamed(remote) + if err != nil { + return nil, "", err + } + + type canonicalWithTag interface { + reference.Canonical + Tag() string + } + + if canonical, ok := remoteRef.(canonicalWithTag); ok { + remoteRef, err = reference.WithDigest(reference.TrimNamed(remoteRef), canonical.Digest()) + if err != nil { + return nil, "", err + } + return remoteRef, canonical.Tag(), nil + } + + remoteRef = reference.TagNameOnly(remoteRef) + + return remoteRef, "", nil +} + +func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + metaHeaders, authConfig := parseHeaders(r.Header) + + ref, _, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + privileges, err := pr.backend.Privileges(ctx, ref, metaHeaders, authConfig) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, privileges) +} + +func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + var privileges types.PluginPrivileges + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&privileges); err != nil { + return errors.Wrap(err, "failed to parse privileges") + } + if dec.More() { + return errors.New("invalid privileges") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + ref, tag, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + name, err := getName(ref, tag, vars["name"]) + if err != nil { + return err + } + w.Header().Set("Docker-Plugin-Name", name) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Upgrade(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + + return nil +} + +func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + var privileges types.PluginPrivileges + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&privileges); err != nil { + return errors.Wrap(err, "failed to parse privileges") + } + if dec.More() { + return errors.New("invalid privileges") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + ref, tag, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + name, err := getName(ref, tag, r.FormValue("name")) + if err != nil { + return err + } + w.Header().Set("Docker-Plugin-Name", name) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Pull(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + + return nil +} + +func getName(ref reference.Named, tag, name string) (string, error) { + if name == "" { + if _, ok := ref.(reference.Canonical); ok { + trimmed := reference.TrimNamed(ref) + if tag != "" { + nt, err := reference.WithTag(trimmed, tag) + if err != nil { + return "", err + } + name = reference.FamiliarString(nt) + } else { + name = reference.FamiliarString(reference.TagNameOnly(trimmed)) + } + } else { + name = reference.FamiliarString(ref) + } + } else { + localRef, err := reference.ParseNormalizedNamed(name) + if err != nil { + return "", err + } + if _, ok := localRef.(reference.Canonical); ok { + return "", errors.New("cannot use digest in plugin tag") + } + if reference.IsNameOnly(localRef) { + // TODO: log change in name to out stream + name = reference.FamiliarString(reference.TagNameOnly(localRef)) + } + } + return name, nil +} + +func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + options := &types.PluginCreateOptions{ + RepoName: r.FormValue("name")} + + if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil { + return err + } + //TODO: send progress bar + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + timeout, err := strconv.Atoi(r.Form.Get("timeout")) + if err != nil { + return err + } + config := &types.PluginEnableConfig{Timeout: timeout} + + return pr.backend.Enable(name, config) +} + +func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.PluginDisableConfig{ + ForceDisable: httputils.BoolValue(r, "force"), + } + + return pr.backend.Disable(name, config) +} + +func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.PluginRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + } + return pr.backend.Remove(name, config) +} + +func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Push(ctx, vars["name"], metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + return nil +} + +func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var args []string + if err := json.NewDecoder(r.Body).Decode(&args); err != nil { + return err + } + if err := pr.backend.Set(vars["name"], args); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (pr *pluginRouter) listPlugins(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pluginFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + l, err := pr.backend.List(pluginFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, l) +} + +func (pr *pluginRouter) inspectPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + result, err := pr.backend.Inspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, result) +} diff --git a/components/engine/api/server/router/router.go b/components/engine/api/server/router/router.go new file mode 100644 index 0000000000..2de25c27ff --- /dev/null +++ b/components/engine/api/server/router/router.go @@ -0,0 +1,19 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// Router defines an interface to specify a group of routes to add to the docker server. +type Router interface { + // Routes returns the list of routes to add to the docker server. + Routes() []Route +} + +// Route defines an individual API route in the docker server. +type Route interface { + // Handler returns the raw function to create the http handler. + Handler() httputils.APIFunc + // Method returns the http method that the route responds to. + Method() string + // Path returns the subpath where the route responds to. + Path() string +} diff --git a/components/engine/api/server/router/swarm/backend.go b/components/engine/api/server/router/swarm/backend.go new file mode 100644 index 0000000000..3b7933d7b1 --- /dev/null +++ b/components/engine/api/server/router/swarm/backend.go @@ -0,0 +1,47 @@ +package swarm + +import ( + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + types "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// Backend abstracts a swarm manager. +type Backend interface { + Init(req types.InitRequest) (string, error) + Join(req types.JoinRequest) error + Leave(force bool) error + Inspect() (types.Swarm, error) + Update(uint64, types.Spec, types.UpdateFlags) error + GetUnlockKey() (string, error) + UnlockSwarm(req types.UnlockRequest) error + + GetServices(basictypes.ServiceListOptions) ([]types.Service, error) + GetService(idOrName string, insertDefaults bool) (types.Service, error) + CreateService(types.ServiceSpec, string, bool) (*basictypes.ServiceCreateResponse, error) + UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions, bool) (*basictypes.ServiceUpdateResponse, error) + RemoveService(string) error + + ServiceLogs(context.Context, *backend.LogSelector, *basictypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) + + GetNodes(basictypes.NodeListOptions) ([]types.Node, error) + GetNode(string) (types.Node, error) + UpdateNode(string, uint64, types.NodeSpec) error + RemoveNode(string, bool) error + + GetTasks(basictypes.TaskListOptions) ([]types.Task, error) + GetTask(string) (types.Task, error) + + GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error) + CreateSecret(s types.SecretSpec) (string, error) + RemoveSecret(idOrName string) error + GetSecret(id string) (types.Secret, error) + UpdateSecret(idOrName string, version uint64, spec types.SecretSpec) error + + GetConfigs(opts basictypes.ConfigListOptions) ([]types.Config, error) + CreateConfig(s types.ConfigSpec) (string, error) + RemoveConfig(id string) error + GetConfig(id string) (types.Config, error) + UpdateConfig(idOrName string, version uint64, spec types.ConfigSpec) error +} diff --git a/components/engine/api/server/router/swarm/cluster.go b/components/engine/api/server/router/swarm/cluster.go new file mode 100644 index 0000000000..2529250b0c --- /dev/null +++ b/components/engine/api/server/router/swarm/cluster.go @@ -0,0 +1,63 @@ +package swarm + +import "github.com/docker/docker/api/server/router" + +// swarmRouter is a router to talk with the build controller +type swarmRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend) router.Router { + r := &swarmRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the swarm controller +func (sr *swarmRouter) Routes() []router.Route { + return sr.routes +} + +func (sr *swarmRouter) initRoutes() { + sr.routes = []router.Route{ + router.NewPostRoute("/swarm/init", sr.initCluster), + router.NewPostRoute("/swarm/join", sr.joinCluster), + router.NewPostRoute("/swarm/leave", sr.leaveCluster), + router.NewGetRoute("/swarm", sr.inspectCluster), + router.NewGetRoute("/swarm/unlockkey", sr.getUnlockKey), + router.NewPostRoute("/swarm/update", sr.updateCluster), + router.NewPostRoute("/swarm/unlock", sr.unlockCluster), + + router.NewGetRoute("/services", sr.getServices), + router.NewGetRoute("/services/{id}", sr.getService), + router.NewPostRoute("/services/create", sr.createService), + router.NewPostRoute("/services/{id}/update", sr.updateService), + router.NewDeleteRoute("/services/{id}", sr.removeService), + router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs, router.WithCancel), + + router.NewGetRoute("/nodes", sr.getNodes), + router.NewGetRoute("/nodes/{id}", sr.getNode), + router.NewDeleteRoute("/nodes/{id}", sr.removeNode), + router.NewPostRoute("/nodes/{id}/update", sr.updateNode), + + router.NewGetRoute("/tasks", sr.getTasks), + router.NewGetRoute("/tasks/{id}", sr.getTask), + router.NewGetRoute("/tasks/{id}/logs", sr.getTaskLogs, router.WithCancel), + + router.NewGetRoute("/secrets", sr.getSecrets), + router.NewPostRoute("/secrets/create", sr.createSecret), + router.NewDeleteRoute("/secrets/{id}", sr.removeSecret), + router.NewGetRoute("/secrets/{id}", sr.getSecret), + router.NewPostRoute("/secrets/{id}/update", sr.updateSecret), + + router.NewGetRoute("/configs", sr.getConfigs), + router.NewPostRoute("/configs/create", sr.createConfig), + router.NewDeleteRoute("/configs/{id}", sr.removeConfig), + router.NewGetRoute("/configs/{id}", sr.getConfig), + router.NewPostRoute("/configs/{id}/update", sr.updateConfig), + } +} diff --git a/components/engine/api/server/router/swarm/cluster_routes.go b/components/engine/api/server/router/swarm/cluster_routes.go new file mode 100644 index 0000000000..91461da764 --- /dev/null +++ b/components/engine/api/server/router/swarm/cluster_routes.go @@ -0,0 +1,492 @@ +package swarm + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.InitRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + nodeID, err := sr.backend.Init(req) + if err != nil { + logrus.Errorf("Error initializing swarm: %v", err) + return err + } + return httputils.WriteJSON(w, http.StatusOK, nodeID) +} + +func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.JoinRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + return sr.backend.Join(req) +} + +func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + force := httputils.BoolValue(r, "force") + return sr.backend.Leave(force) +} + +func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + swarm, err := sr.backend.Inspect() + if err != nil { + logrus.Errorf("Error getting swarm: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, swarm) +} + +func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var swarm types.Spec + if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + err := fmt.Errorf("invalid swarm version '%s': %v", rawVersion, err) + return errors.NewBadRequestError(err) + } + + var flags types.UpdateFlags + + if value := r.URL.Query().Get("rotateWorkerToken"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + err := fmt.Errorf("invalid value for rotateWorkerToken: %s", value) + return errors.NewBadRequestError(err) + } + + flags.RotateWorkerToken = rot + } + + if value := r.URL.Query().Get("rotateManagerToken"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + err := fmt.Errorf("invalid value for rotateManagerToken: %s", value) + return errors.NewBadRequestError(err) + } + + flags.RotateManagerToken = rot + } + + if value := r.URL.Query().Get("rotateManagerUnlockKey"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid value for rotateManagerUnlockKey: %s", value)) + } + + flags.RotateManagerUnlockKey = rot + } + + if err := sr.backend.Update(version, swarm, flags); err != nil { + logrus.Errorf("Error configuring swarm: %v", err) + return err + } + return nil +} + +func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.UnlockRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + if err := sr.backend.UnlockSwarm(req); err != nil { + logrus.Errorf("Error unlocking swarm: %v", err) + return err + } + return nil +} + +func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + unlockKey, err := sr.backend.GetUnlockKey() + if err != nil { + logrus.WithError(err).Errorf("Error retrieving swarm unlock key") + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &basictypes.SwarmUnlockKeyResponse{ + UnlockKey: unlockKey, + }) +} + +func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting services: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, services) +} + +func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var insertDefaults bool + if value := r.URL.Query().Get("insertDefaults"); value != "" { + var err error + insertDefaults, err = strconv.ParseBool(value) + if err != nil { + err := fmt.Errorf("invalid value for insertDefaults: %s", value) + return errors.NewBadRequestError(err) + } + } + + service, err := sr.backend.GetService(vars["id"], insertDefaults) + if err != nil { + logrus.Errorf("Error getting service %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, service) +} + +func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var service types.ServiceSpec + if err := json.NewDecoder(r.Body).Decode(&service); err != nil { + return err + } + + // Get returns "" if the header does not exist + encodedAuth := r.Header.Get("X-Registry-Auth") + cliVersion := r.Header.Get("version") + queryRegistry := false + if cliVersion != "" && versions.LessThan(cliVersion, "1.30") { + queryRegistry = true + } + + resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry) + if err != nil { + logrus.Errorf("Error creating service %s: %v", service.Name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, resp) +} + +func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var service types.ServiceSpec + if err := json.NewDecoder(r.Body).Decode(&service); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + err := fmt.Errorf("invalid service version '%s': %v", rawVersion, err) + return errors.NewBadRequestError(err) + } + + var flags basictypes.ServiceUpdateOptions + + // Get returns "" if the header does not exist + flags.EncodedRegistryAuth = r.Header.Get("X-Registry-Auth") + flags.RegistryAuthFrom = r.URL.Query().Get("registryAuthFrom") + flags.Rollback = r.URL.Query().Get("rollback") + cliVersion := r.Header.Get("version") + queryRegistry := false + if cliVersion != "" && versions.LessThan(cliVersion, "1.30") { + queryRegistry = true + } + + resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry) + if err != nil { + logrus.Errorf("Error updating service %s: %v", vars["id"], err) + return err + } + return httputils.WriteJSON(w, http.StatusOK, resp) +} + +func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveService(vars["id"]); err != nil { + logrus.Errorf("Error removing service %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) getTaskLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // make a selector to pass to the helper function + selector := &backend.LogSelector{ + Tasks: []string{vars["id"]}, + } + return sr.swarmLogs(ctx, w, r, selector) +} + +func (sr *swarmRouter) getServiceLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // make a selector to pass to the helper function + selector := &backend.LogSelector{ + Services: []string{vars["id"]}, + } + return sr.swarmLogs(ctx, w, r, selector) +} + +func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting nodes: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, nodes) +} + +func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + node, err := sr.backend.GetNode(vars["id"]) + if err != nil { + logrus.Errorf("Error getting node %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, node) +} + +func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var node types.NodeSpec + if err := json.NewDecoder(r.Body).Decode(&node); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + err := fmt.Errorf("invalid node version '%s': %v", rawVersion, err) + return errors.NewBadRequestError(err) + } + + if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil { + logrus.Errorf("Error updating node %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + force := httputils.BoolValue(r, "force") + + if err := sr.backend.RemoveNode(vars["id"], force); err != nil { + logrus.Errorf("Error removing node %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting tasks: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, tasks) +} + +func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + task, err := sr.backend.GetTask(vars["id"]) + if err != nil { + logrus.Errorf("Error getting task %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, task) +} + +func (sr *swarmRouter) getSecrets(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + secrets, err := sr.backend.GetSecrets(basictypes.SecretListOptions{Filters: filters}) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, secrets) +} + +func (sr *swarmRouter) createSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var secret types.SecretSpec + if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { + return err + } + + id, err := sr.backend.CreateSecret(secret) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &basictypes.SecretCreateResponse{ + ID: id, + }) +} + +func (sr *swarmRouter) removeSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveSecret(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (sr *swarmRouter) getSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + secret, err := sr.backend.GetSecret(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, secret) +} + +func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var secret types.SecretSpec + if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { + return errors.NewBadRequestError(err) + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid secret version")) + } + + id := vars["id"] + if err := sr.backend.UpdateSecret(id, version, secret); err != nil { + return err + } + + return nil +} + +func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + configs, err := sr.backend.GetConfigs(basictypes.ConfigListOptions{Filters: filters}) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, configs) +} + +func (sr *swarmRouter) createConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config types.ConfigSpec + if err := json.NewDecoder(r.Body).Decode(&config); err != nil { + return err + } + + id, err := sr.backend.CreateConfig(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &basictypes.ConfigCreateResponse{ + ID: id, + }) +} + +func (sr *swarmRouter) removeConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveConfig(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (sr *swarmRouter) getConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + config, err := sr.backend.GetConfig(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, config) +} + +func (sr *swarmRouter) updateConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config types.ConfigSpec + if err := json.NewDecoder(r.Body).Decode(&config); err != nil { + return errors.NewBadRequestError(err) + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid config version")) + } + + id := vars["id"] + if err := sr.backend.UpdateConfig(id, version, config); err != nil { + return err + } + + return nil +} diff --git a/components/engine/api/server/router/swarm/helpers.go b/components/engine/api/server/router/swarm/helpers.go new file mode 100644 index 0000000000..ea692ea368 --- /dev/null +++ b/components/engine/api/server/router/swarm/helpers.go @@ -0,0 +1,65 @@ +package swarm + +import ( + "fmt" + "net/http" + + "github.com/docker/docker/api/server/httputils" + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "golang.org/x/net/context" +) + +// swarmLogs takes an http response, request, and selector, and writes the logs +// specified by the selector to the response +func (sr *swarmRouter) swarmLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, selector *backend.LogSelector) error { + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + // there is probably a neater way to manufacture the ContainerLogsOptions + // struct, probably in the caller, to eliminate the dependency on net/http + logsConfig := &basictypes.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + Details: httputils.BoolValue(r, "details"), + } + + tty := false + // checking for whether logs are TTY involves iterating over every service + // and task. idk if there is a better way + for _, service := range selector.Services { + s, err := sr.backend.GetService(service, false) + if err != nil { + // maybe should return some context with this error? + return err + } + tty = s.Spec.TaskTemplate.ContainerSpec.TTY || tty + } + for _, task := range selector.Tasks { + t, err := sr.backend.GetTask(task) + if err != nil { + // as above + return err + } + tty = t.Spec.ContainerSpec.TTY || tty + } + + msgs, err := sr.backend.ServiceLogs(ctx, selector, logsConfig) + if err != nil { + return err + } + + httputils.WriteLogStream(ctx, w, msgs, logsConfig, !tty) + return nil +} diff --git a/components/engine/api/server/router/system/backend.go b/components/engine/api/server/router/system/backend.go new file mode 100644 index 0000000000..da1de380db --- /dev/null +++ b/components/engine/api/server/router/system/backend.go @@ -0,0 +1,21 @@ +package system + +import ( + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// Backend is the methods that need to be implemented to provide +// system specific functionality. +type Backend interface { + SystemInfo() (*types.Info, error) + SystemVersion() types.Version + SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) + SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(chan interface{}) + AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) +} diff --git a/components/engine/api/server/router/system/system.go b/components/engine/api/server/router/system/system.go new file mode 100644 index 0000000000..f8a851cfe3 --- /dev/null +++ b/components/engine/api/server/router/system/system.go @@ -0,0 +1,39 @@ +package system + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/daemon/cluster" +) + +// systemRouter provides information about the Docker system overall. +// It gathers information about host, daemon and container events. +type systemRouter struct { + backend Backend + cluster *cluster.Cluster + routes []router.Route +} + +// NewRouter initializes a new system router +func NewRouter(b Backend, c *cluster.Cluster) router.Router { + r := &systemRouter{ + backend: b, + cluster: c, + } + + r.routes = []router.Route{ + router.NewOptionsRoute("/{anyroute:.*}", optionsHandler), + router.NewGetRoute("/_ping", pingHandler), + router.NewGetRoute("/events", r.getEvents, router.WithCancel), + router.NewGetRoute("/info", r.getInfo), + router.NewGetRoute("/version", r.getVersion), + router.NewGetRoute("/system/df", r.getDiskUsage, router.WithCancel), + router.NewPostRoute("/auth", r.postAuth), + } + + return r +} + +// Routes returns all the API routes dedicated to the docker system +func (s *systemRouter) Routes() []router.Route { + return s.routes +} diff --git a/components/engine/api/server/router/system/system_routes.go b/components/engine/api/server/router/system/system_routes.go new file mode 100644 index 0000000000..7d3ba9a6ec --- /dev/null +++ b/components/engine/api/server/router/system/system_routes.go @@ -0,0 +1,186 @@ +package system + +import ( + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} + +func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info, err := s.backend.SystemInfo() + if err != nil { + return err + } + if s.cluster != nil { + info.Swarm = s.cluster.Info() + } + + if versions.LessThan(httputils.VersionFromContext(ctx), "1.25") { + // TODO: handle this conversion in engine-api + type oldInfo struct { + *types.Info + ExecutionDriver string + } + old := &oldInfo{ + Info: info, + ExecutionDriver: "", + } + nameOnlySecurityOptions := []string{} + kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) + if err != nil { + return err + } + for _, s := range kvSecOpts { + nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) + } + old.SecurityOptions = nameOnlySecurityOptions + return httputils.WriteJSON(w, http.StatusOK, old) + } + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info := s.backend.SystemVersion() + info.APIVersion = api.DefaultVersion + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + du, err := s.backend.SystemDiskUsage(ctx) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, du) +} + +func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + since, err := eventTime(r.Form.Get("since")) + if err != nil { + return err + } + until, err := eventTime(r.Form.Get("until")) + if err != nil { + return err + } + + var ( + timeout <-chan time.Time + onlyPastEvents bool + ) + if !until.IsZero() { + if until.Before(since) { + return errors.NewBadRequestError(fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))) + } + + now := time.Now() + + onlyPastEvents = until.Before(now) + + if !onlyPastEvents { + dur := until.Sub(now) + timeout = time.NewTimer(dur).C + } + } + + ef, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + defer output.Close() + output.Flush() + + enc := json.NewEncoder(output) + + buffered, l := s.backend.SubscribeToEvents(since, until, ef) + defer s.backend.UnsubscribeFromEvents(l) + + for _, ev := range buffered { + if err := enc.Encode(ev); err != nil { + return err + } + } + + if onlyPastEvents { + return nil + } + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + logrus.Warnf("unexpected event message: %q", ev) + continue + } + if err := enc.Encode(jev); err != nil { + return err + } + case <-timeout: + return nil + case <-ctx.Done(): + logrus.Debug("Client context cancelled, stop sending events") + return nil + } + } +} + +func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *types.AuthConfig + err := json.NewDecoder(r.Body).Decode(&config) + r.Body.Close() + if err != nil { + return err + } + status, token, err := s.backend.AuthenticateToRegistry(ctx, config) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, ®istry.AuthenticateOKBody{ + Status: status, + IdentityToken: token, + }) +} + +func eventTime(formTime string) (time.Time, error) { + t, tNano, err := timetypes.ParseTimestamps(formTime, -1) + if err != nil { + return time.Time{}, err + } + if t == -1 { + return time.Time{}, nil + } + return time.Unix(t, tNano), nil +} diff --git a/components/engine/api/server/router/volume/backend.go b/components/engine/api/server/router/volume/backend.go new file mode 100644 index 0000000000..b97cb94785 --- /dev/null +++ b/components/engine/api/server/router/volume/backend.go @@ -0,0 +1,19 @@ +package volume + +import ( + "golang.org/x/net/context" + + // TODO return types need to be refactored into pkg + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// Backend is the methods that need to be implemented to provide +// volume specific functionality +type Backend interface { + Volumes(filter string) ([]*types.Volume, []string, error) + VolumeInspect(name string) (*types.Volume, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + VolumeRm(name string, force bool) error + VolumesPrune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error) +} diff --git a/components/engine/api/server/router/volume/volume.go b/components/engine/api/server/router/volume/volume.go new file mode 100644 index 0000000000..b24c8fee55 --- /dev/null +++ b/components/engine/api/server/router/volume/volume.go @@ -0,0 +1,36 @@ +package volume + +import "github.com/docker/docker/api/server/router" + +// volumeRouter is a router to talk with the volumes controller +type volumeRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new volume router +func NewRouter(b Backend) router.Router { + r := &volumeRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the volumes controller +func (r *volumeRouter) Routes() []router.Route { + return r.routes +} + +func (r *volumeRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/volumes", r.getVolumesList), + router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), + // POST + router.NewPostRoute("/volumes/create", r.postVolumesCreate), + router.NewPostRoute("/volumes/prune", r.postVolumesPrune, router.WithCancel), + // DELETE + router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), + } +} diff --git a/components/engine/api/server/router/volume/volume_routes.go b/components/engine/api/server/router/volume/volume_routes.go new file mode 100644 index 0000000000..f0f490119b --- /dev/null +++ b/components/engine/api/server/router/volume/volume_routes.go @@ -0,0 +1,85 @@ +package volume + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &volumetypes.VolumesListOKBody{Volumes: volumes, Warnings: warnings}) +} + +func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volume, err := v.backend.VolumeInspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, volume) +} + +func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var req volumetypes.VolumesCreateBody + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusCreated, volume) +} + +func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + force := httputils.BoolValue(r, "force") + if err := v.backend.VolumeRm(vars["name"], force); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := v.backend.VolumesPrune(ctx, pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/components/engine/api/server/router_swapper.go b/components/engine/api/server/router_swapper.go new file mode 100644 index 0000000000..1ecc7a7f39 --- /dev/null +++ b/components/engine/api/server/router_swapper.go @@ -0,0 +1,30 @@ +package server + +import ( + "net/http" + "sync" + + "github.com/gorilla/mux" +) + +// routerSwapper is an http.Handler that allows you to swap +// mux routers. +type routerSwapper struct { + mu sync.Mutex + router *mux.Router +} + +// Swap changes the old router with the new one. +func (rs *routerSwapper) Swap(newRouter *mux.Router) { + rs.mu.Lock() + rs.router = newRouter + rs.mu.Unlock() +} + +// ServeHTTP makes the routerSwapper to implement the http.Handler interface. +func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { + rs.mu.Lock() + router := rs.router + rs.mu.Unlock() + router.ServeHTTP(w, r) +} diff --git a/components/engine/api/server/server.go b/components/engine/api/server/server.go new file mode 100644 index 0000000000..d402019113 --- /dev/null +++ b/components/engine/api/server/server.go @@ -0,0 +1,208 @@ +package server + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/dockerversion" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// versionMatcher defines a variable matcher to be parsed by the router +// when a request is about to be served. +const versionMatcher = "/v{version:[0-9.]+}" + +// Config provides the configuration for the API server +type Config struct { + Logging bool + EnableCors bool + CorsHeaders string + Version string + SocketGroup string + TLSConfig *tls.Config +} + +// Server contains instance details for the server +type Server struct { + cfg *Config + servers []*HTTPServer + routers []router.Router + routerSwapper *routerSwapper + middlewares []middleware.Middleware +} + +// New returns a new instance of the server based on the specified configuration. +// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). +func New(cfg *Config) *Server { + return &Server{ + cfg: cfg, + } +} + +// UseMiddleware appends a new middleware to the request chain. +// This needs to be called before the API routes are configured. +func (s *Server) UseMiddleware(m middleware.Middleware) { + s.middlewares = append(s.middlewares, m) +} + +// Accept sets a listener the server accepts connections into. +func (s *Server) Accept(addr string, listeners ...net.Listener) { + for _, listener := range listeners { + httpServer := &HTTPServer{ + srv: &http.Server{ + Addr: addr, + }, + l: listener, + } + s.servers = append(s.servers, httpServer) + } +} + +// Close closes servers and thus stop receiving requests +func (s *Server) Close() { + for _, srv := range s.servers { + if err := srv.Close(); err != nil { + logrus.Error(err) + } + } +} + +// serveAPI loops through all initialized servers and spawns goroutine +// with Serve method for each. It sets createMux() as Handler also. +func (s *Server) serveAPI() error { + var chErrors = make(chan error, len(s.servers)) + for _, srv := range s.servers { + srv.srv.Handler = s.routerSwapper + go func(srv *HTTPServer) { + var err error + logrus.Infof("API listen on %s", srv.l.Addr()) + if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(srv) + } + + for range s.servers { + err := <-chErrors + if err != nil { + return err + } + } + return nil +} + +// HTTPServer contains an instance of http server and the listener. +// srv *http.Server, contains configuration to create an http server and a mux router with all api end points. +// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. +type HTTPServer struct { + srv *http.Server + l net.Listener +} + +// Serve starts listening for inbound requests. +func (s *HTTPServer) Serve() error { + return s.srv.Serve(s.l) +} + +// Close closes the HTTPServer from listening for the inbound requests. +func (s *HTTPServer) Close() error { + return s.l.Close() +} + +func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Define the context that we'll pass around to share info + // like the docker-request-id. + // + // The 'context' will be used for global data that should + // apply to all requests. Data that is specific to the + // immediate function being called should still be passed + // as 'args' on the function call. + ctx := context.WithValue(context.Background(), dockerversion.UAStringKey, r.Header.Get("User-Agent")) + handlerFunc := s.handlerWithGlobalMiddlewares(handler) + + vars := mux.Vars(r) + if vars == nil { + vars = make(map[string]string) + } + + if err := handlerFunc(ctx, w, r, vars); err != nil { + statusCode := httputils.GetHTTPErrorStatusCode(err) + if statusCode >= 500 { + logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) + } + httputils.MakeErrorHandler(err)(w, r) + } + } +} + +// InitRouter initializes the list of routers for the server. +// This method also enables the Go profiler if enableProfiler is true. +func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) { + s.routers = append(s.routers, routers...) + + m := s.createMux() + if enableProfiler { + profilerSetup(m) + } + s.routerSwapper = &routerSwapper{ + router: m, + } +} + +// createMux initializes the main router the server uses. +func (s *Server) createMux() *mux.Router { + m := mux.NewRouter() + + logrus.Debug("Registering routers") + for _, apiRouter := range s.routers { + for _, r := range apiRouter.Routes() { + f := s.makeHTTPHandler(r.Handler()) + + logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) + m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) + m.Path(r.Path()).Methods(r.Method()).Handler(f) + } + } + + err := errors.NewRequestNotFoundError(fmt.Errorf("page not found")) + notFoundHandler := httputils.MakeErrorHandler(err) + m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler) + m.NotFoundHandler = notFoundHandler + + return m +} + +// Wait blocks the server goroutine until it exits. +// It sends an error message if there is any error during +// the API execution. +func (s *Server) Wait(waitChan chan error) { + if err := s.serveAPI(); err != nil { + logrus.Errorf("ServeAPI error: %v", err) + waitChan <- err + return + } + waitChan <- nil +} + +// DisableProfiler reloads the server mux without adding the profiler routes. +func (s *Server) DisableProfiler() { + s.routerSwapper.Swap(s.createMux()) +} + +// EnableProfiler reloads the server mux adding the profiler routes. +func (s *Server) EnableProfiler() { + m := s.createMux() + profilerSetup(m) + s.routerSwapper.Swap(m) +} diff --git a/components/engine/api/server/server_test.go b/components/engine/api/server/server_test.go new file mode 100644 index 0000000000..272dc81fb6 --- /dev/null +++ b/components/engine/api/server/server_test.go @@ -0,0 +1,46 @@ +package server + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + + "golang.org/x/net/context" +) + +func TestMiddlewares(t *testing.T) { + cfg := &Config{ + Version: "0.1omega2", + } + srv := &Server{ + cfg: cfg, + } + + srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinVersion)) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + localHandler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatal("Expected version, got empty string") + } + + if sv := w.Header().Get("Server"); !strings.Contains(sv, "Docker/0.1omega2") { + t.Fatalf("Expected server version in the header `Docker/0.1omega2`, got %s", sv) + } + + return nil + } + + handlerFunc := srv.handlerWithGlobalMiddlewares(localHandler) + if err := handlerFunc(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/api/swagger-gen.yaml b/components/engine/api/swagger-gen.yaml new file mode 100644 index 0000000000..f07a02737f --- /dev/null +++ b/components/engine/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/components/engine/api/swagger.yaml b/components/engine/api/swagger.yaml new file mode 100644 index 0000000000..4bce7d6610 --- /dev/null +++ b/components/engine/api/swagger.yaml @@ -0,0 +1,8471 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.30" +info: + title: "Docker Engine API" + version: "1.30" + x-logo: + url: "https://docs.docker.com/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break. + + For Docker Engine 17.06, the API version is 1.30. To lock to this version, you prefix the URL with `/v1.30`. For example, calling `/info` is the same as calling `/v1.30/info`. + + Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. + + In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker. + + The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. + + This documentation is for version 1.30 of the API, which was introduced with Docker 17.06. Use this table to find documentation for previous versions of the API: + + Docker version | API version | Changes + ----------------|-------------|--------- + 17.05.x | [1.29](https://docs.docker.com/engine/api/v1.29/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-29-api-changes) + 17.04.x | [1.28](https://docs.docker.com/engine/api/v1.28/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-28-api-changes) + 17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes) + 1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes) + 1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes) + 1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes) + 1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes) + 1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes) + 1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes) + 1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes) + 1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes) + 1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes) + + # Authentication + + Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. + + To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: "A mount point inside a container" + properties: + Type: + type: "string" + Name: + type: "string" + Source: + type: "string" + Destination: + type: "string" + Driver: + type: "string" + Mode: + type: "string" + RW: + type: "boolean" + Propagation: + type: "string" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: "If `on-failure` is used, the number of times to retry before giving up" + default: {} + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: "An integer value representing this container's relative CPU weight versus other containers." + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpusetCpus: + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" + type: "string" + CpusetMems: + description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DiskQuota: + description: "Disk limit (in bytes)." + type: "integer" + format: "int64" + KernelMemory: + description: "Kernel memory limit in bytes." + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." + type: "integer" + format: "int64" + MemorySwappiness: + description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCPUs: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + PidsLimit: + description: "Tune a container's pids limit. Set -1 for unlimited." + type: "integer" + format: "int64" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: "Maximum IO in bytes per second for the container system drive (Windows only)" + type: "integer" + format: "int64" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: "The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + Timeout: + description: "The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + Retries: + description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." + type: "integer" + StartPeriod: + description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `host-src:container-dest:ro` to make the bind-mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. + - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to." + PortBindings: + type: "object" + description: "A map of exposed container ports and the host port they should map to." + additionalProperties: + type: "object" + properties: + HostIp: + type: "string" + description: "The host IP address" + HostPort: + type: "string" + description: "The host port number, as a string" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: "A list of volumes to inherit from another container, specified in the form `[:]`." + items: + type: "string" + Mounts: + description: "Specification for mounts to be added to the container." + type: "array" + items: + $ref: "#/definitions/Mount" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: "A list of kernel capabilities to add to the container." + items: + type: "string" + CapDrop: + type: "array" + description: "A list of kernel capabilities to drop from the container." + items: + type: "string" + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + IpcMode: + type: "string" + description: "IPC namespace to use for the container." + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: "A list of links for the container in the form `container_name:alias`." + items: + type: "string" + OomScoreAdj: + type: "integer" + description: "An integer value containing the score given to the container in order to tune OOM killer preferences." + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: "Allocates a random host port for all of a container's exposed ports." + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: "A list of string values to customize labels for MLS + systems, such as SELinux." + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." + ShmSize: + type: "integer" + description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array. (Windows only)" + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Isolation: + type: "string" + description: "Isolation technology of the container. (Windows only)" + enum: + - "default" + - "process" + - "hyperv" + + Config: + description: "Configuration for a container that is portable between hosts" + type: "object" + properties: + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Domainname: + description: "The domain name to use for the container." + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + Tty: + description: "Attach standard streams to a TTY, including `stdin` if it is not closed." + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. + type: "array" + items: + type: "string" + Cmd: + description: "Command to run specified as a string or an array of strings." + type: + - "array" + - "string" + items: + type: "string" + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + Image: + description: "The name of the image to use when creating the container" + type: "string" + Volumes: + description: "An object mapping mount point paths inside the container to empty objects." + type: "object" + properties: + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: + - "array" + - "string" + items: + type: "string" + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + MacAddress: + description: "MAC address of the container." + type: "string" + OnBuild: + description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." + type: "array" + items: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + StopSignal: + description: "Signal to stop a container as a string or unsigned integer." + type: "string" + default: "SIGTERM" + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + Shell: + description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." + type: "array" + items: + type: "string" + + NetworkConfig: + description: "TODO: check is correct" + type: "object" + properties: + Bridge: + type: "string" + Gateway: + type: "string" + Address: + type: "string" + IPPrefixLen: + type: "integer" + MacAddress: + type: "string" + PortMapping: + type: "string" + Ports: + type: "array" + items: + $ref: "#/definitions/Port" + + GraphDriverData: + description: "Information about a container's graph driver." + type: "object" + required: [Name, Data] + properties: + Name: + type: "string" + x-nullable: false + Data: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + + Image: + type: "object" + required: + - Id + - Parent + - Comment + - Created + - Container + - DockerVersion + - Author + - Architecture + - Os + - Size + - VirtualSize + - GraphDriver + - RootFS + properties: + Id: + type: "string" + x-nullable: false + RepoTags: + type: "array" + items: + type: "string" + RepoDigests: + type: "array" + items: + type: "string" + Parent: + type: "string" + x-nullable: false + Comment: + type: "string" + x-nullable: false + Created: + type: "string" + x-nullable: false + Container: + type: "string" + x-nullable: false + ContainerConfig: + $ref: "#/definitions/Config" + DockerVersion: + type: "string" + x-nullable: false + Author: + type: "string" + x-nullable: false + Config: + $ref: "#/definitions/Config" + Architecture: + type: "string" + x-nullable: false + Os: + type: "string" + x-nullable: false + OsVersion: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + VirtualSize: + type: "integer" + format: "int64" + x-nullable: false + GraphDriver: + $ref: "#/definitions/GraphDriverData" + RootFS: + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + Layers: + type: "array" + items: + type: "string" + BaseLayer: + type: "string" + + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + type: "string" + x-nullable: false + ParentId: + type: "string" + x-nullable: false + RepoTags: + type: "array" + x-nullable: false + items: + type: "string" + RepoDigests: + type: "array" + x-nullable: false + items: + type: "string" + Created: + type: "integer" + x-nullable: false + Size: + type: "integer" + x-nullable: false + SharedSize: + type: "integer" + x-nullable: false + VirtualSize: + type: "integer" + x-nullable: false + Labels: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + Containers: + x-nullable: false + type: "integer" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + Scope: + type: "string" + description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." + default: "local" + x-nullable: false + enum: ["local", "global"] + Options: + type: "object" + description: "The driver specific options used when creating the volume." + additionalProperties: + type: "string" + UsageData: + type: "object" + required: [Size, RefCount] + properties: + Size: + type: "integer" + description: "The disk space used by the volume (local driver only)" + default: -1 + x-nullable: false + RefCount: + type: "integer" + default: -1 + description: "The number of containers referencing this volume." + x-nullable: false + + example: + Name: "tardis" + Driver: "custom" + Mountpoint: "/var/lib/docker/volumes/tardis" + Status: + hello: "world" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Attachable: + type: "boolean" + Ingress: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Attachable: false + Ingress: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + Options: + description: "Driver-specific options, specified as a map." + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + CreateImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + ProgressDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + IPAMConfig: + description: "IPAM configurations for the endpoint" + type: "object" + properties: + IPv4Address: + type: "string" + IPv6Address: + type: "string" + LinkLocalIPs: + type: "array" + items: + type: "string" + Links: + type: "array" + items: + type: "string" + Aliases: + type: "array" + items: + type: "string" + NetworkID: + type: "string" + EndpointID: + type: "string" + Gateway: + type: "string" + IPAddress: + type: "string" + IPPrefixLen: + type: "integer" + IPv6Gateway: + type: "string" + GlobalIPv6Address: + type: "string" + GlobalIPv6PrefixLen: + type: "integer" + format: "int64" + MacAddress: + type: "string" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + Destination: + type: "string" + x-nullable: false + Type: + type: "string" + x-nullable: false + Options: + type: "array" + items: + type: "string" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + Name: + type: "string" + x-nullable: false + Enabled: + description: "True when the plugin is running. False when the plugin is not running, only installed." + type: "boolean" + x-nullable: false + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Documentation: + type: "string" + x-nullable: false + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + Socket: + type: "string" + x-nullable: false + Entrypoint: + type: "array" + items: + type: "string" + WorkDir: + type: "string" + x-nullable: false + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + GID: + type: "integer" + format: "uint32" + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + AllowAllDevices: + type: "boolean" + x-nullable: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + IpcHost: + type: "boolean" + x-nullable: false + PidHost: + type: "boolean" + x-nullable: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + diff_ids: + type: "array" + items: + type: "string" + example: + Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. + The client must send the version number along with the modified specification when updating these objects. + This approach ensures safe concurrency and determinism in that the change on the object + may not be applied if the version number has changed from the last read. In other words, + if two update requests specify the same base version, only one of the requests can succeed. + As a result, two separate update requests that happen at the same time will not + unintentially overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "int64" + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + Node: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + type: "object" + properties: + Hostname: + type: "string" + Platform: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + Resources: + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + MemoryBytes: + type: "integer" + format: "int64" + Engine: + type: "object" + properties: + EngineVersion: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + TLSInfo: + $ref: "#/definitions/SwarmSpec" + example: + ID: "24ifsmvkjbyhk" + Version: + Index: 8 + CreatedAt: "2016-06-07T20:31:11.853781916Z" + UpdatedAt: "2016-06-07T20:31:11.999868824Z" + Spec: + Name: "my-node" + Role: "manager" + Availability: "active" + Labels: + foo: "bar" + Description: + Hostname: "bf3067039e47" + Platform: + Architecture: "x86_64" + OS: "linux" + Resources: + NanoCPUs: 4000000000 + MemoryBytes: 8272408576 + Engine: + EngineVersion: "17.04.0" + Labels: + foo: "bar" + Plugins: + - Type: "Volume" + Name: "local" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + Status: + State: "ready" + Addr: "172.17.0.2" + ManagerStatus: + Leader: true + Reachability: "reachable" + Addr: "172.17.0.2:2377" + TLSInfo: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + TLSInfo: + description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate" + type: "object" + properties: + TrustRoot: + description: "The root CA certificate(s) that are used to validate leaf TLS certificates" + type: "string" + CertIssuerSubject: + description: "The base64-url-safe-encoded raw subject bytes of the issuer" + type: "string" + CertIssuerPublicKey: + description: "The base64-url-safe-encoded raw public key bytes of the issuer" + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Orchestration: + description: "Orchestration configuration." + type: "object" + properties: + TaskHistoryRetentionLimit: + description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." + type: "integer" + format: "int64" + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "int64" + KeepOldSnapshots: + description: "The number of snapshots to keep beyond the current snapshot." + type: "integer" + format: "int64" + LogEntriesForSlowFollowers: + description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." + type: "integer" + format: "int64" + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + Dispatcher: + description: "Dispatcher configuration." + type: "object" + properties: + HeartbeatPeriod: + description: "The delay for an agent to send a heartbeat to the dispatcher." + type: "integer" + format: "int64" + CAConfig: + description: "CA configuration." + type: "object" + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + ExternalCAs: + description: "Configuration for forwarding signing requests to an external certificate authority." + type: "array" + items: + type: "object" + properties: + Protocol: + description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: "URL where certificate signing requests should be sent." + type: "string" + Options: + description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." + type: "object" + additionalProperties: + type: "string" + CACert: + description: "The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided)." + type: "string" + SigningCACert: + description: "The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format." + type: "string" + SigningCAKey: + description: "The desired signing CA key for all swarm node TLS leaf certificates, in PEM format." + type: "string" + ForceRotate: + description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: "If set, generate a key and use it to lock data stored on the managers." + type: "boolean" + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if unspecified by a service. + + Updating this value will only have an affect on new tasks. Old tasks will continue use their previously configured log driver until recreated. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + example: + Name: "default" + Orchestration: + TaskHistoryRetentionLimit: 10 + Raft: + SnapshotInterval: 10000 + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + ElectionTick: 3 + Dispatcher: + HeartbeatPeriod: 5000000000 + CAConfig: + NodeCertExpiry: 7776000000000000 + JoinTokens: + Worker: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + EncryptionConfig: + AutoLockManagers: false + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: "Whether there is currently a root CA rotation in progress for the swarm" + type: "boolean" + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + ContainerSpec: + type: "object" + properties: + Image: + description: "The image name to use for the container." + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Env: + description: "A list of environment variables in the form `VAR=value`." + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: "Specification for mounts to be added to containers created as part of the service." + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: "Amount of time to wait for the container to terminate before forcefully killing it." + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. + The format of extra hosts on swarmkit is specified in: + http://man7.org/linux/man-pages/man5/hosts.5.html + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." + type: "array" + items: + type: "string" + Secrets: + description: "Secrets contains references to zero or more secrets that will be exposed to the service." + type: "array" + items: + type: "object" + properties: + File: + description: "File represents a specific target that is backed by a file." + type: "object" + properties: + Name: + description: "Name represents the final filename in the filesystem." + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: "SecretID represents the ID of the specific secret that we're referencing." + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, but this is just provided for + lookup/display purposes. The secret in the reference will be identified by its ID. + type: "string" + + Resources: + description: "Resource requirements which apply to each individual container created as part of the service." + type: "object" + properties: + Limits: + description: "Define resources limits." + type: "object" + properties: + NanoCPUs: + description: "CPU limit in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory limit in Bytes." + type: "integer" + format: "int64" + Reservation: + description: "Define resources reservation." + properties: + NanoCPUs: + description: "CPU reservation in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory reservation in Bytes." + type: "integer" + format: "int64" + RestartPolicy: + description: "Specification for the restart policy which applies to containers created as part of this service." + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." + type: "integer" + format: "int64" + default: 0 + Window: + description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: "An array of constraints." + type: "array" + items: + type: "string" + Preferences: + description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence." + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: "label descriptor, such as engine.labels.az" + type: "string" + Platforms: + description: "An array of supported platforms." + type: "array" + items: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + ForceUpdate: + description: "A counter that triggers an update even if no relevant parameters have been changed." + type: "integer" + Runtime: + description: "Runtime is the type of runtime specified for the task executor." + type: "string" + RuntimeData: + description: "RuntimeData is the payload sent to be used with the runtime for the executor." + type: "array" + Networks: + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + LogDriver: + description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + ServiceSpec: + description: "User modifiable configuration for a service." + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an updated task fails to run, or stops running during the update." + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: "Amount of time to monitor each updated task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Order: + description: "The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between rollback iterations, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an rolled back task fails to run, or stops running during the rollback." + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: "Amount of time to monitor each rolled back task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Order: + description: "The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: "Array of network names or IDs to attach the service to." + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: "The mode of resolution to use for internal load balancing + between tasks." + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + ImageDeleteResponseItem: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + ContainerSummary: + type: "array" + items: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/Mount" + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: "Base64-url-safe-encoded secret data" + type: "array" + items: + type: "string" + Secret: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" +paths: + /containers/json: + get: + summary: "List containers" + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: "Return all containers. By default, only running containers are shown" + type: "boolean" + default: false + - name: "limit" + in: "query" + description: "Return this number of most recently created containers, including non-running ones." + type: "integer" + - name: "size" + in: "query" + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + type: "string" + pattern: "/?[a-zA-Z0-9_-]+" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/Config" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + description: "This container's networking configuration." + type: "object" + properties: + EndpointsConfig: + description: "A mapping of network name to endpoint configuration for that network." + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + NanoCPUs: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: -1 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + type: "object" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + examples: + application/json: + Id: "e90e34656806" + Warnings: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 406: + description: "impossible to attach" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + description: "The state of the container." + type: "object" + properties: + Status: + description: | + The status of the container. For example, `"running"` or `"exited"`. + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the cgroups freezer is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determin if a container's state is "running". + type: "boolean" + Paused: + description: "Whether this container is paused." + type: "boolean" + Restarting: + description: "Whether this container is restarting." + type: "boolean" + OOMKilled: + description: "Whether this container has been killed because it ran out of memory." + type: "boolean" + Dead: + type: "boolean" + Pid: + description: "The process ID of this container" + type: "integer" + ExitCode: + description: "The last exit code of this container" + type: "integer" + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + FinishedAt: + description: "The time when this container last exited." + type: "string" + Image: + description: "The container's image" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Node: + description: "TODO" + type: "object" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + type: "string" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriverData" + SizeRw: + description: "The size of files that have been created or changed by this container." + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/Config" + NetworkSettings: + $ref: "#/definitions/NetworkConfig" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + IpcMode: "" + LxcConf: [] + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + SecondaryIPAddresses: null + SecondaryIPv6Addresses: null + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: "Each process running in the container, where each is process is an array of values corresponding to the titles" + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + operationId: "ContainerLogs" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + type: "object" + x-go-name: "ContainerChangeResponseItem" + required: [Path, Kind] + properties: + Path: + description: "Path to file that has changed" + type: "string" + x-nullable: false + Kind: + description: "Kind of change" + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of last read, which is used + for calculating the CPU usage percentage. It is not the same as the + `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: "Stream the output. If false, the stats will be output once and then it will disconnect." + type: "boolean" + default: true + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the tty session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the tty session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: "Send a POSIX signal to a container, defaulting to killing to the container." + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: "Change various configuration options of a container without having to recreate it." + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + KernelMemory: 52428800 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the cgroups freezer to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. + + See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. + + Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Stream attached streams from the time the request was made onwards" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + type: "object" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + x-nullable: false + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: "Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'." + type: "string" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove the volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: "TODO" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." + type: "string" + - name: "inputStream" + in: "body" + required: true + description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + examples: + application/json: + - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + ParentId: "" + RepoTags: + - "ubuntu:12.04" + - "ubuntu:precise" + RepoDigests: + - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" + Created: 1474925151 + Size: 103579269 + VirtualSize: 103579269 + SharedSize: 0 + Labels: {} + Containers: 2 + - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" + ParentId: "" + RepoTags: + - "ubuntu:12.10" + - "ubuntu:quantal" + RepoDigests: + - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" + - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" + Created: 1403128455 + Size: 172064416 + VirtualSize: 172064416 + SharedSize: 0 + Labels: {} + Containers: 5 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + type: "string" + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: "JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)" + type: "integer" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: "Sets the networking mode for the run commands during + build. Supported standard values are: `bridge`, `host`, `none`, and + `container:`. Any other value is taken as a custom network's + name to which this container should connect to." + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Image" + examples: + application/json: + Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" + Comment: "" + Os: "linux" + Architecture: "amd64" + Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + ContainerConfig: + Tty: false + Hostname: "e611e15f9c9d" + Domainname: "" + AttachStdout: false + PublishService: "" + AttachStdin: false + OpenStdin: false + StdinOnce: false + NetworkDisabled: false + OnBuild: [] + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + User: "" + WorkingDir: "" + MacAddress: "" + AttachStderr: false + Labels: + com.example.license: "GPL" + com.example.version: "1.0" + com.example.vendor: "Acme" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + - "/bin/sh" + - "-c" + - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + DockerVersion: "1.9.0-dev" + VirtualSize: 188359297 + Size: 0 + Author: "" + Created: "2015-09-10T08:30:53.26995814Z" + GraphDriver: + Name: "aufs" + Data: {} + RepoDigests: + - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + RepoTags: + - "example:1.0" + - "example:latest" + - "example:stable" + Config: + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + NetworkDisabled: false + OnBuild: [] + StdinOnce: false + PublishService: "" + AttachStdin: false + OpenStdin: false + Domainname: "" + AttachStdout: false + Tty: false + Hostname: "e611e15f9c9d" + Cmd: + - "/bin/bash" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Labels: + com.example.vendor: "Acme" + com.example.version: "1.0" + com.example.license: "GPL" + MacAddress: "" + AttachStderr: false + WorkingDir: "" + User: "" + RootFS: + Type: "layers" + Layers: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-automated=(true|false)` + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + Architecture: + type: "string" + Containers: + type: "integer" + ContainersRunning: + type: "integer" + ContainersStopped: + type: "integer" + ContainersPaused: + type: "integer" + CpuCfsPeriod: + type: "boolean" + CpuCfsQuota: + type: "boolean" + Debug: + type: "boolean" + DiscoveryBackend: + type: "string" + DockerRootDir: + type: "string" + Driver: + type: "string" + DriverStatus: + type: "array" + items: + type: "array" + items: + type: "string" + SystemStatus: + type: "array" + items: + type: "array" + items: + type: "string" + Plugins: + type: "object" + properties: + Volume: + type: "array" + items: + type: "string" + Network: + type: "array" + items: + type: "string" + Log: + type: "array" + items: + type: "string" + ExperimentalBuild: + type: "boolean" + HttpProxy: + type: "string" + HttpsProxy: + type: "string" + ID: + type: "string" + IPv4Forwarding: + type: "boolean" + Images: + type: "integer" + IndexServerAddress: + type: "string" + InitPath: + type: "string" + InitSha1: + type: "string" + KernelVersion: + type: "string" + Labels: + type: "array" + items: + type: "string" + MemTotal: + type: "integer" + MemoryLimit: + type: "boolean" + NCPU: + type: "integer" + NEventsListener: + type: "integer" + NFd: + type: "integer" + NGoroutines: + type: "integer" + Name: + type: "string" + NoProxy: + type: "string" + OomKillDisable: + type: "boolean" + OSType: + type: "string" + OomScoreAdj: + type: "integer" + OperatingSystem: + type: "string" + RegistryConfig: + type: "object" + properties: + IndexConfigs: + type: "object" + additionalProperties: + type: "object" + properties: + Mirrors: + type: "array" + items: + type: "string" + Name: + type: "string" + Official: + type: "boolean" + Secure: + type: "boolean" + InsecureRegistryCIDRs: + type: "array" + items: + type: "string" + SwapLimit: + type: "boolean" + SystemTime: + type: "string" + ServerVersion: + type: "string" + examples: + application/json: + Architecture: "x86_64" + ClusterStore: "etcd://localhost:2379" + CgroupDriver: "cgroupfs" + Containers: 11 + ContainersRunning: 7 + ContainersStopped: 3 + ContainersPaused: 1 + CpuCfsPeriod: true + CpuCfsQuota: true + Debug: false + DockerRootDir: "/var/lib/docker" + Driver: "btrfs" + DriverStatus: + - + - "" + ExperimentalBuild: false + HttpProxy: "http://test:test@localhost:8080" + HttpsProxy: "https://test:test@localhost:8080" + ID: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + IPv4Forwarding: true + Images: 16 + IndexServerAddress: "https://index.docker.io/v1/" + InitPath: "/usr/bin/docker" + InitSha1: "" + KernelMemory: true + KernelVersion: "3.12.0-1-amd64" + Labels: + - "storage=ssd" + MemTotal: 2099236864 + MemoryLimit: true + NCPU: 1 + NEventsListener: 0 + NFd: 11 + NGoroutines: 21 + Name: "prod-server-42" + NoProxy: "9.81.1.160" + OomKillDisable: true + OSType: "linux" + OperatingSystem: "Boot2Docker" + Plugins: + Volume: + - "local" + Network: + - "null" + - "host" + - "bridge" + RegistryConfig: + IndexConfigs: + docker.io: + Name: "docker.io" + Official: true + Secure: true + InsecureRegistryCIDRs: + - "127.0.0.0/8" + SecurityOptions: + - Key: "Name" + Value: "seccomp" + - Key: "Profile" + Value: "default" + - Key: "Name" + Value: "apparmor" + - Key: "Name" + Value: "selinux" + - Key: "Name" + Value: "userns" + ServerVersion: "1.9.0" + SwapLimit: false + SystemStatus: + - + - "State" + - "Healthy" + SystemTime: "2015-03-10T11:11:23.730591467-07:00" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Version: + type: "string" + ApiVersion: + type: "string" + MinAPIVersion: + type: "string" + GitCommit: + type: "string" + GoVersion: + type: "string" + Os: + type: "string" + Arch: + type: "string" + KernelVersion: + type: "string" + Experimental: + type: "boolean" + BuildTime: + type: "string" + examples: + application/json: + Version: "17.04.0" + Os: "linux" + KernelVersion: "3.19.0-23-generic" + GoVersion: "go1.7.5" + GitCommit: "deadbee" + Arch: "amd64" + ApiVersion: "1.27" + MinAPIVersion: "1.12" + BuildTime: "2016-06-14T07:09:13.444803460+00:00" + Experimental: true + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/Config" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update` + + Images report these events: `delete, import, load, pull, push, save, tag, untag` + + Volumes report these events: `create, mount, unmount, destroy` + + Networks report these events: `create, connect, disconnect, destroy` + + The Docker daemon reports these events: `reload` + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + Action: + description: "The type of event" + type: "string" + Actor: + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + Attributes: + description: "Various key/value attributes of the object, depending on its type" + type: "object" + additionalProperties: + type: "string" + time: + description: "Timestamp of event" + type: "integer" + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + examples: + application/json: + Type: "container" + Action: "create" + Actor: + ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + com.example.some-label: "some-label-value" + image: "alpine" + name: "my-container" + time: 1461943101 + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `plugin`= plugin name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, or `daemon` + - `volume=` volume name or ID + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "" + Labels: null + Scope: "" + Options: null + UsageData: + Size: 0 + RefCount: 0 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image repositories. + + For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + DetachKeys: + type: "string" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: "A list of environment variables in the form `[\"VAR=value\", ...]`." + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: + Detach: false + Tty: false + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." + operationId: "ExecResize" + responses: + 201: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + type: "object" + required: [Volumes, Warnings] + properties: + Volumes: + type: "array" + x-nullable: false + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + x-nullable: false + description: "Warnings that occurred when fetching the list of volumes" + items: + type: "string" + + examples: + application/json: + Volumes: + - Name: "tardis" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + Options: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Warnings: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + type: "object" + properties: + Name: + description: "The new volume's name. If not specified, Docker generates a name." + type: "string" + x-nullable: false + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + DriverOpts: + description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Driver: "custom" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than inspecting a single network. For example, + the list of containers attached to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: + + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: "Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions." + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode." + type: "boolean" + Ingress: + description: "Ingress network is the network which provides the routing-mesh in swarm mode." + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Attachable: false + Ingress: false + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to disconnect from the network." + Force: + type: "boolean" + description: "Force the container to disconnect from the network." + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + example: + - Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + description: "Describes a permission the user has to accept upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "force" + in: "query" + description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: "The version number of the node object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + description: "The tokens workers and managers need to join the swarm." + type: "object" + properties: + Worker: + description: "The token workers can use to join the swarm." + type: "string" + Manager: + description: "The token managers can use to join the swarm." + type: "string" + example: + CreatedAt: "2016-08-15T16:00:20.349727406Z" + Spec: + Dispatcher: + HeartbeatPeriod: 5000000000 + Orchestration: + TaskHistoryRetentionLimit: 10 + CAConfig: + NodeCertExpiry: 7776000000000000 + Raft: + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + SnapshotInterval: 10000 + ElectionTick: 3 + TaskDefaults: {} + EncryptionConfig: + AutoLockManagers: false + Name: "default" + JoinTokens: + Worker: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a" + Manager: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + ID: "70ilmkj2f6sp2137c753w2nmt" + UpdatedAt: "2016-08-15T16:32:09.623207604Z" + Version: + Index: 51 + RootRotationInProgress: false + TLSInfo: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + RemoteAddrs: + description: "Addresses of manager nodes already participating in the swarm." + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: "Force leave swarm, even if this is the last manager or that it will break the cluster." + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: "The version number of the service object being updated. This is required to avoid conflicting writes." + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + type: "string" + description: "If the X-Registry-Auth header is not specified, this + parameter indicates where to find registry authorization credentials. The + valid values are `spec` and `previous-spec`." + default: "spec" + - name: "rollback" + in: "query" + type: "string" + description: "Set to this parameter to `previous` to cause a + server-side rollback to the previous service spec. The supplied spec will be + ignored in this case." + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "ServiceLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created secret." + type: "string" + example: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." + - name: "version" + in: "query" + description: "The version number of the secret object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: "Return image digest and platform information by contacting the registry." + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + type: "object" + x-go-name: DistributionInspect + required: [Descriptor, Platforms] + properties: + Descriptor: + type: "object" + description: "A descriptor struct containing digest, media type, and size" + properties: + MediaType: + type: "string" + Size: + type: "integer" + format: "int64" + Digest: + type: "string" + URLs: + type: "array" + items: + type: "string" + Platforms: + type: "array" + description: "An array containing all platforms supported by the image" + items: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + OSVersion: + type: "string" + OSFeatures: + type: "array" + items: + type: "string" + Variant: + type: "string" + Features: + type: "array" + items: + type: "string" + examples: + application/json: + Descriptor: + MediaType: "application/vnd.docker.distribution.manifest.v2+json" + Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + Size: 3987495 + URLs: + - "" + Platforms: + - Architecture: "amd64" + OS: "linux" + OSVersion: "" + OSFeatures: + - "" + Variant: "" + Features: + - "" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] diff --git a/components/engine/api/templates/server/operation.gotmpl b/components/engine/api/templates/server/operation.gotmpl new file mode 100644 index 0000000000..3ff63ef94c --- /dev/null +++ b/components/engine/api/templates/server/operation.gotmpl @@ -0,0 +1,26 @@ +package {{ .Package }} + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +import ( + "net/http" + + context "golang.org/x/net/context" + + {{ range .DefaultImports }}{{ printf "%q" . }} + {{ end }} + {{ range $key, $value := .Imports }}{{ $key }} {{ printf "%q" $value }} + {{ end }} +) + + +{{ range .ExtraSchemas }} +// {{ .Name }} {{ template "docstring" . }} +// swagger:model {{ .Name }} +{{ template "schema" . }} +{{ end }} diff --git a/components/engine/api/types/auth.go b/components/engine/api/types/auth.go new file mode 100644 index 0000000000..056af6b842 --- /dev/null +++ b/components/engine/api/types/auth.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/components/engine/api/types/backend/backend.go b/components/engine/api/types/backend/backend.go new file mode 100644 index 0000000000..368ad7b5ac --- /dev/null +++ b/components/engine/api/types/backend/backend.go @@ -0,0 +1,104 @@ +// Package backend includes types to send information to server backends. +package backend + +import ( + "io" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. +type ContainerAttachConfig struct { + GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) + UseStdin bool + UseStdout bool + UseStderr bool + Logs bool + Stream bool + DetachKeys string + + // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/stderr messages accordingly. + // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... + // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. + // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. + MuxStreams bool +} + +// LogMessage is datastructure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +// changes to this struct need to be reflect in the reset method in +// daemon/logger/logger.go +type LogMessage struct { + Line []byte + Source string + Timestamp time.Time + Attrs LogAttributes + Partial bool + + // Err is an error associated with a message. Completeness of a message + // with Err is not expected, tho it may be partially complete (fields may + // be missing, gibberish, or nil) + Err error +} + +// LogAttributes is used to hold the extra attributes available in the log message +// Primarily used for converting the map type to string and sorting. +type LogAttributes map[string]string + +// LogSelector is a list of services and tasks that should be returned as part +// of a log stream. It is similar to swarmapi.LogSelector, with the difference +// that the names don't have to be resolved to IDs; this is mostly to avoid +// accidents later where a swarmapi LogSelector might have been incorrectly +// used verbatim (and to avoid the handler having to import swarmapi types) +type LogSelector struct { + Services []string + Tasks []string +} + +// ContainerStatsConfig holds information for configuring the runtime +// behavior of a backend.ContainerStats() call. +type ContainerStatsConfig struct { + Stream bool + OutStream io.Writer + Version string +} + +// ExecInspect holds information about a running process started +// with docker exec. +type ExecInspect struct { + ID string + Running bool + ExitCode *int + ProcessConfig *ExecProcessConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Pid int +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} + +// ContainerCommitConfig is a wrapper around +// types.ContainerCommitConfig that also +// transports configuration changes for a container. +type ContainerCommitConfig struct { + types.ContainerCommitConfig + Changes []string + // TODO: ContainerConfig is only used by the dockerfile Builder, so remove it + // once the Builder has been updated to use a different interface + ContainerConfig *container.Config +} diff --git a/components/engine/api/types/backend/build.go b/components/engine/api/types/backend/build.go new file mode 100644 index 0000000000..5de35e6d88 --- /dev/null +++ b/components/engine/api/types/backend/build.go @@ -0,0 +1,31 @@ +package backend + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/streamformatter" +) + +// ProgressWriter is a data object to transport progress streams to the client +type ProgressWriter struct { + Output io.Writer + StdoutFormatter io.Writer + StderrFormatter io.Writer + AuxFormatter *streamformatter.AuxFormatter + ProgressReaderFunc func(io.ReadCloser) io.ReadCloser +} + +// BuildConfig is the configuration used by a BuildManager to start a build +type BuildConfig struct { + Source io.ReadCloser + ProgressWriter ProgressWriter + Options *types.ImageBuildOptions +} + +// GetImageAndLayerOptions are the options supported by GetImageAndReleasableLayer +type GetImageAndLayerOptions struct { + ForcePull bool + AuthConfig map[string]types.AuthConfig + Output io.Writer +} diff --git a/components/engine/api/types/blkiodev/blkio.go b/components/engine/api/types/blkiodev/blkio.go new file mode 100644 index 0000000000..931ae10ab1 --- /dev/null +++ b/components/engine/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/components/engine/api/types/client.go b/components/engine/api/types/client.go new file mode 100644 index 0000000000..0ce2c94303 --- /dev/null +++ b/components/engine/api/types/client.go @@ -0,0 +1,384 @@ +package types + +import ( + "bufio" + "io" + "net" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string +} + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +//ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args +} + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/components/engine/api/types/configs.go b/components/engine/api/types/configs.go new file mode 100644 index 0000000000..20c19f2132 --- /dev/null +++ b/components/engine/api/types/configs.go @@ -0,0 +1,69 @@ +package types + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ContainerCommitConfig contains build configs for commit operation, +// and is used when making a commit with the current state of the container. +type ContainerCommitConfig struct { + Pause bool + Repo string + Tag string + Author string + Comment string + // merge container config into commit config before commit + MergeConfigs bool + Config *container.Config +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} diff --git a/components/engine/api/types/container/config.go b/components/engine/api/types/container/config.go new file mode 100644 index 0000000000..55a03fc981 --- /dev/null +++ b/components/engine/api/types/container/config.go @@ -0,0 +1,69 @@ +package container + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/components/engine/api/types/container/container_changes.go b/components/engine/api/types/container/container_changes.go new file mode 100644 index 0000000000..767945a532 --- /dev/null +++ b/components/engine/api/types/container/container_changes.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerChangeResponseItem container change response item +// swagger:model ContainerChangeResponseItem +type ContainerChangeResponseItem struct { + + // Kind of change + // Required: true + Kind uint8 `json:"Kind"` + + // Path to file that has changed + // Required: true + Path string `json:"Path"` +} diff --git a/components/engine/api/types/container/container_create.go b/components/engine/api/types/container/container_create.go new file mode 100644 index 0000000000..c95023b814 --- /dev/null +++ b/components/engine/api/types/container/container_create.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerCreateCreatedBody container create created body +// swagger:model ContainerCreateCreatedBody +type ContainerCreateCreatedBody struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/components/engine/api/types/container/container_top.go b/components/engine/api/types/container/container_top.go new file mode 100644 index 0000000000..78bc37ee5e --- /dev/null +++ b/components/engine/api/types/container/container_top.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerTopOKBody container top o k body +// swagger:model ContainerTopOKBody +type ContainerTopOKBody struct { + + // Each process running in the container, where each is process is an array of values corresponding to the titles + // Required: true + Processes [][]string `json:"Processes"` + + // The ps column titles + // Required: true + Titles []string `json:"Titles"` +} diff --git a/components/engine/api/types/container/container_update.go b/components/engine/api/types/container/container_update.go new file mode 100644 index 0000000000..2339366fbd --- /dev/null +++ b/components/engine/api/types/container/container_update.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody container update o k body +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/components/engine/api/types/container/container_wait.go b/components/engine/api/types/container/container_wait.go new file mode 100644 index 0000000000..77ecdbaf7a --- /dev/null +++ b/components/engine/api/types/container/container_wait.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerWaitOKBody container wait o k body +// swagger:model ContainerWaitOKBody +type ContainerWaitOKBody struct { + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/components/engine/api/types/container/host_config.go b/components/engine/api/types/container/host_config.go new file mode 100644 index 0000000000..9fea9eb04b --- /dev/null +++ b/components/engine/api/types/container/host_config.go @@ -0,0 +1,380 @@ +package container + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" +) + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its private ipc stack. +func (n IpcMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's ipc stack. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's ipc stack. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the ipc stack is valid. +func (n IpcMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup + DiskQuota int64 // Disk limit (in bytes) + KernelMemory int64 // Kernel memory limit (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit int64 // Setting pids limit for a container + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + ConsoleSize [2]uint // Initial console size (height,width) + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` +} diff --git a/components/engine/api/types/container/hostconfig_unix.go b/components/engine/api/types/container/hostconfig_unix.go new file mode 100644 index 0000000000..2d664d1c96 --- /dev/null +++ b/components/engine/api/types/container/hostconfig_unix.go @@ -0,0 +1,41 @@ +// +build !windows + +package container + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} diff --git a/components/engine/api/types/container/hostconfig_windows.go b/components/engine/api/types/container/hostconfig_windows.go new file mode 100644 index 0000000000..469923f7e9 --- /dev/null +++ b/components/engine/api/types/container/hostconfig_windows.go @@ -0,0 +1,54 @@ +package container + +import ( + "strings" +) + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsContainer() { + return "container" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} diff --git a/components/engine/api/types/container/waitcondition.go b/components/engine/api/types/container/waitcondition.go new file mode 100644 index 0000000000..64820fe358 --- /dev/null +++ b/components/engine/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/components/engine/api/types/error_response.go b/components/engine/api/types/error_response.go new file mode 100644 index 0000000000..dc942d9d9e --- /dev/null +++ b/components/engine/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/components/engine/api/types/events/events.go b/components/engine/api/types/events/events.go new file mode 100644 index 0000000000..5f5f540346 --- /dev/null +++ b/components/engine/api/types/events/events.go @@ -0,0 +1,50 @@ +package events + +const ( + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // DaemonEventType is the event type that daemon generate + DaemonEventType = "daemon" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" + // PluginEventType is the event type that plugins generate + PluginEventType = "plugin" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" + // ServiceEventType is the event type that services generate + ServiceEventType = "service" + // NodeEventType is the event type that nodes generate + NodeEventType = "node" + // SecretEventType is the event type that secrets generate + SecretEventType = "secret" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set or attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + Type string + Action string + Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/components/engine/api/types/filters/parse.go b/components/engine/api/types/filters/parse.go new file mode 100644 index 0000000000..beec3d4940 --- /dev/null +++ b/components/engine/api/types/filters/parse.go @@ -0,0 +1,310 @@ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. +package filters + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" +) + +// Args stores filter arguments as map key:{map key: bool}. +// It contains an aggregation of the map of arguments (which are in the form +// of -f 'key=value') based on the key, and stores values for the same key +// in a map with string keys and boolean values. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} +type Args struct { + fields map[string]map[string]bool +} + +// NewArgs initializes a new Args struct. +func NewArgs() Args { + return Args{fields: map[string]map[string]bool{}} +} + +// ParseFlag parses the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + + filters.Add(name, value) + + return filters, nil +} + +// ErrBadFormat is an error returned in case of bad format for a filter. +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam packs the Args into a string for easy transport from client to server. +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + buf, err := json.Marshal(a.fields) + if err != nil { + return "", err + } + return string(buf), nil +} + +// ToParamWithVersion packs the Args into a string for easy transport from client to server. +// The generated string will depend on the specified version (corresponding to the API version). +func ToParamWithVersion(version string, a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + // for daemons older than v1.10, filter must be of the form map[string][]string + var buf []byte + var err error + if version != "" && versions.LessThan(version, "1.22") { + buf, err = json.Marshal(convertArgsToSlice(a.fields)) + } else { + buf, err = json.Marshal(a.fields) + } + if err != nil { + return "", err + } + return string(buf), nil +} + +// FromParam unpacks the filter Args. +func FromParam(p string) (Args, error) { + if len(p) == 0 { + return NewArgs(), nil + } + + r := strings.NewReader(p) + d := json.NewDecoder(r) + + m := map[string]map[string]bool{} + if err := d.Decode(&m); err != nil { + r.Seek(0, 0) + + // Allow parsing old arguments in slice format. + // Because other libraries might be sending them in this format. + deprecated := map[string][]string{} + if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { + m = deprecatedArgs(deprecated) + } else { + return NewArgs(), err + } + } + return Args{m}, nil +} + +// Get returns the list of values associates with a field. +// It returns a slice of strings to keep backwards compatibility with old code. +func (filters Args) Get(field string) []string { + values := filters.fields[field] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add adds a new value to a filter field. +func (filters Args) Add(name, value string) { + if _, ok := filters.fields[name]; ok { + filters.fields[name][value] = true + } else { + filters.fields[name] = map[string]bool{value: true} + } +} + +// Del removes a value from a filter field. +func (filters Args) Del(name, value string) { + if _, ok := filters.fields[name]; ok { + delete(filters.fields[name], value) + if len(filters.fields[name]) == 0 { + delete(filters.fields, name) + } + } +} + +// Len returns the number of fields in the arguments. +func (filters Args) Len() int { + return len(filters.fields) +} + +// MatchKVList returns true if the values for the specified field matches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label1': '1', 'label2': '2'} +// it returns true. +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters.fields[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. +func (filters Args) Match(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the filters. +func (filters Args) ExactMatch(field, source string) bool { + fieldValues, ok := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. +func (filters Args) UniqueExactMatch(field, source string) bool { + fieldValues := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(filters.fields[field]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one of the filters, +// or the source has one of the filters as a prefix. +func (filters Args) FuzzyMatch(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Include returns true if the name of the field to filter is in the filters. +func (filters Args) Include(field string) bool { + _, ok := filters.fields[field] + return ok +} + +// Validate ensures that all the fields in the filter are valid. +// It returns an error as soon as it finds an invalid field. +func (filters Args) Validate(accepted map[string]bool) error { + for name := range filters.fields { + if !accepted[name] { + return fmt.Errorf("Invalid filter '%s'", name) + } + } + return nil +} + +// WalkValues iterates over the list of filtered values for a field. +// It stops the iteration if it finds an error and it returns that error. +func (filters Args) WalkValues(field string, op func(value string) error) error { + if _, ok := filters.fields[field]; !ok { + return nil + } + for v := range filters.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/components/engine/api/types/filters/parse_test.go b/components/engine/api/types/filters/parse_test.go new file mode 100644 index 0000000000..8198f89d7e --- /dev/null +++ b/components/engine/api/types/filters/parse_test.go @@ -0,0 +1,417 @@ +package filters + +import ( + "errors" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = NewArgs() + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args.Get("created")) != 1 { + t.Error("failed to set this arg") + } + if len(args.Get("image.name")) != 2 { + t.Error("the args should have collapsed") + } +} + +func TestParseArgsEdgeCase(t *testing.T) { + var filters Args + args, err := ParseFlag("", filters) + if err != nil { + t.Fatal(err) + } + if args.Len() != 0 { + t.Fatalf("Expected an empty Args (map), got %v", args) + } + if args, err = ParseFlag("anything", args); err == nil || err != ErrBadFormat { + t.Fatalf("Expected ErrBadFormat, got %v", err) + } +} + +func TestToParam(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + _, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } +} + +func TestToParamWithVersion(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + str1, err := ToParamWithVersion("1.21", a) + if err != nil { + t.Errorf("failed to marshal the filters with version < 1.22: %s", err) + } + str2, err := ToParamWithVersion("1.22", a) + if err != nil { + t.Errorf("failed to marshal the filters with version >= 1.22: %s", err) + } + if str1 != `{"created":["today"],"image.name":["*untu","ubuntu*"]}` && + str1 != `{"created":["today"],"image.name":["ubuntu*","*untu"]}` { + t.Errorf("incorrectly marshaled the filters: %s", str1) + } + if str2 != `{"created":{"today":true},"image.name":{"*untu":true,"ubuntu*":true}}` && + str2 != `{"created":{"today":true},"image.name":{"ubuntu*":true,"*untu":true}}` { + t.Errorf("incorrectly marshaled the filters: %s", str2) + } +} + +func TestFromParam(t *testing.T) { + invalids := []string{ + "anything", + "['a','list']", + "{'key': 'value'}", + `{"key": "value"}`, + } + valid := map[*Args][]string{ + &Args{fields: map[string]map[string]bool{"key": {"value": true}}}: { + `{"key": ["value"]}`, + `{"key": {"value": true}}`, + }, + &Args{fields: map[string]map[string]bool{"key": {"value1": true, "value2": true}}}: { + `{"key": ["value1", "value2"]}`, + `{"key": {"value1": true, "value2": true}}`, + }, + &Args{fields: map[string]map[string]bool{"key1": {"value1": true}, "key2": {"value2": true}}}: { + `{"key1": ["value1"], "key2": ["value2"]}`, + `{"key1": {"value1": true}, "key2": {"value2": true}}`, + }, + } + + for _, invalid := range invalids { + if _, err := FromParam(invalid); err == nil { + t.Fatalf("Expected an error with %v, got nothing", invalid) + } + } + + for expectedArgs, matchers := range valid { + for _, json := range matchers { + args, err := FromParam(json) + if err != nil { + t.Fatal(err) + } + if args.Len() != expectedArgs.Len() { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for key, expectedValues := range expectedArgs.fields { + values := args.Get(key) + + if len(values) != len(expectedValues) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + + for _, v := range values { + if !expectedValues[v] { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + } + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if a.Len() != v1.Len() { + t.Error("these should both be empty sets") + } +} + +func TestArgsMatchKVListEmptySources(t *testing.T) { + args := NewArgs() + if !args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected true for (%v,created), got true", args) + } + + args = Args{map[string]map[string]bool{"created": {"today": true}}} + if args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected false for (%v,created), got true", args) + } +} + +func TestArgsMatchKVList(t *testing.T) { + // Not empty sources + sources := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + + matches := map[*Args]string{ + &Args{}: "field", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1": true}}, + }: "labels", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1=value1": true}}, + }: "labels", + } + + for args, field := range matches { + if args.MatchKVList(field, sources) != true { + t.Fatalf("Expected true for %v on %v, got false", sources, args) + } + } + + differs := map[*Args]string{ + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key4": true}}, + }: "labels", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1=value3": true}}, + }: "labels", + } + + for args, field := range differs { + if args.MatchKVList(field, sources) != false { + t.Fatalf("Expected false for %v on %v, got true", sources, args) + } + } +} + +func TestArgsMatch(t *testing.T) { + source := "today" + + matches := map[*Args]string{ + &Args{}: "field", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}}, + }: "today", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to*": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to(.*)": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tod": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"anyting": true, "to*": true}}, + }: "created", + } + + for args, field := range matches { + if args.Match(field, source) != true { + t.Fatalf("Expected true for %v on %v, got false", source, args) + } + } + + differs := map[*Args]string{ + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tomorrow": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to(day": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tom(.*)": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tom": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today1": true}, + "labels": map[string]bool{"today": true}}, + }: "created", + } + + for args, field := range differs { + if args.Match(field, source) != false { + t.Fatalf("Expected false for %v on %v, got true", source, args) + } + } +} + +func TestAdd(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + v := f.fields["status"] + if len(v) != 1 || !v["running"] { + t.Fatalf("Expected to include a running status, got %v", v) + } + + f.Add("status", "paused") + if len(v) != 2 || !v["paused"] { + t.Fatalf("Expected to include a paused status, got %v", v) + } +} + +func TestDel(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Del("status", "running") + v := f.fields["status"] + if v["running"] { + t.Fatal("Expected to not include a running status filter, got true") + } +} + +func TestLen(t *testing.T) { + f := NewArgs() + if f.Len() != 0 { + t.Fatal("Expected to not include any field") + } + f.Add("status", "running") + if f.Len() != 1 { + t.Fatal("Expected to include one field") + } +} + +func TestExactMatch(t *testing.T) { + f := NewArgs() + + if !f.ExactMatch("status", "running") { + t.Fatal("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + f.Add("status", "pause*") + + if !f.ExactMatch("status", "running") { + t.Fatal("Expected to match `running` with one of the filters, got false") + } + + if f.ExactMatch("status", "paused") { + t.Fatal("Expected to not match `paused` with one of the filters, got true") + } +} + +func TestOnlyOneExactMatch(t *testing.T) { + f := NewArgs() + + if !f.UniqueExactMatch("status", "running") { + t.Fatal("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + + if !f.UniqueExactMatch("status", "running") { + t.Fatal("Expected to match `running` with one of the filters, got false") + } + + if f.UniqueExactMatch("status", "paused") { + t.Fatal("Expected to not match `paused` with one of the filters, got true") + } + + f.Add("status", "pause") + if f.UniqueExactMatch("status", "running") { + t.Fatal("Expected to not match only `running` with two filters, got true") + } +} + +func TestInclude(t *testing.T) { + f := NewArgs() + if f.Include("status") { + t.Fatal("Expected to not include a status key, got true") + } + f.Add("status", "running") + if !f.Include("status") { + t.Fatal("Expected to include a status key, got false") + } +} + +func TestValidate(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + + valid := map[string]bool{ + "status": true, + "dangling": true, + } + + if err := f.Validate(valid); err != nil { + t.Fatal(err) + } + + f.Add("bogus", "running") + if err := f.Validate(valid); err == nil { + t.Fatal("Expected to return an error, got nil") + } +} + +func TestWalkValues(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Add("status", "paused") + + f.WalkValues("status", func(value string) error { + if value != "running" && value != "paused" { + t.Fatalf("Unexpected value %s", value) + } + return nil + }) + + err := f.WalkValues("status", func(value string) error { + return errors.New("return") + }) + if err == nil { + t.Fatal("Expected to get an error, got nil") + } + + err = f.WalkValues("foo", func(value string) error { + return errors.New("return") + }) + if err != nil { + t.Fatalf("Expected to not iterate when the field doesn't exist, got %v", err) + } +} + +func TestFuzzyMatch(t *testing.T) { + f := NewArgs() + f.Add("container", "foo") + + cases := map[string]bool{ + "foo": true, + "foobar": true, + "barfoo": false, + "bar": false, + } + for source, match := range cases { + got := f.FuzzyMatch("container", source) + if got != match { + t.Fatalf("Expected %v, got %v: %s", match, got, source) + } + } +} diff --git a/components/engine/api/types/graph_driver_data.go b/components/engine/api/types/graph_driver_data.go new file mode 100644 index 0000000000..4d9bf1c62c --- /dev/null +++ b/components/engine/api/types/graph_driver_data.go @@ -0,0 +1,17 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// GraphDriverData Information about a container's graph driver. +// swagger:model GraphDriverData +type GraphDriverData struct { + + // data + // Required: true + Data map[string]string `json:"Data"` + + // name + // Required: true + Name string `json:"Name"` +} diff --git a/components/engine/api/types/id_response.go b/components/engine/api/types/id_response.go new file mode 100644 index 0000000000..7592d2f8b1 --- /dev/null +++ b/components/engine/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/components/engine/api/types/image/image_history.go b/components/engine/api/types/image/image_history.go new file mode 100644 index 0000000000..0dd30c729a --- /dev/null +++ b/components/engine/api/types/image/image_history.go @@ -0,0 +1,37 @@ +package image + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// HistoryResponseItem history response item +// swagger:model HistoryResponseItem +type HistoryResponseItem struct { + + // comment + // Required: true + Comment string `json:"Comment"` + + // created + // Required: true + Created int64 `json:"Created"` + + // created by + // Required: true + CreatedBy string `json:"CreatedBy"` + + // Id + // Required: true + ID string `json:"Id"` + + // size + // Required: true + Size int64 `json:"Size"` + + // tags + // Required: true + Tags []string `json:"Tags"` +} diff --git a/components/engine/api/types/image_delete_response_item.go b/components/engine/api/types/image_delete_response_item.go new file mode 100644 index 0000000000..b9a65a0d8e --- /dev/null +++ b/components/engine/api/types/image_delete_response_item.go @@ -0,0 +1,15 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageDeleteResponseItem image delete response item +// swagger:model ImageDeleteResponseItem +type ImageDeleteResponseItem struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/components/engine/api/types/image_summary.go b/components/engine/api/types/image_summary.go new file mode 100644 index 0000000000..e145b3dcfc --- /dev/null +++ b/components/engine/api/types/image_summary.go @@ -0,0 +1,49 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // containers + // Required: true + Containers int64 `json:"Containers"` + + // created + // Required: true + Created int64 `json:"Created"` + + // Id + // Required: true + ID string `json:"Id"` + + // labels + // Required: true + Labels map[string]string `json:"Labels"` + + // parent Id + // Required: true + ParentID string `json:"ParentId"` + + // repo digests + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // repo tags + // Required: true + RepoTags []string `json:"RepoTags"` + + // shared size + // Required: true + SharedSize int64 `json:"SharedSize"` + + // size + // Required: true + Size int64 `json:"Size"` + + // virtual size + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/components/engine/api/types/mount/mount.go b/components/engine/api/types/mount/mount.go new file mode 100644 index 0000000000..2744f85d6d --- /dev/null +++ b/components/engine/api/types/mount/mount.go @@ -0,0 +1,128 @@ +package mount + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Consistency Consistency `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind-mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be converted to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From docker/docker/pkg/mount/flags.go: + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} diff --git a/components/engine/api/types/network/network.go b/components/engine/api/types/network/network.go new file mode 100644 index 0000000000..7c7dbacc85 --- /dev/null +++ b/components/engine/api/types/network/network.go @@ -0,0 +1,108 @@ +package network + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string //Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// PeerInfo represents one peer of an overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string + DriverOpts map[string]string +} + +// Task carries the information about one backend task +type Task struct { + Name string + EndpointID string + EndpointIP string + Info map[string]string +} + +// ServiceInfo represents service parameters with the list of service's tasks +type ServiceInfo struct { + VIP string + Ports []string + LocalLBIndex int + Tasks []Task +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + return &epCopy +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} diff --git a/components/engine/api/types/plugin.go b/components/engine/api/types/plugin.go new file mode 100644 index 0000000000..ed3c2c26e4 --- /dev/null +++ b/components/engine/api/types/plugin.go @@ -0,0 +1,200 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True when the plugin is running. False when the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // Docker Version used to create the plugin + DockerVersion string `json:"DockerVersion,omitempty"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // ipc host + // Required: true + IpcHost bool `json:"IpcHost"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // pid host + // Required: true + PidHost bool `json:"PidHost"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/components/engine/api/types/plugin_device.go b/components/engine/api/types/plugin_device.go new file mode 100644 index 0000000000..5699010675 --- /dev/null +++ b/components/engine/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/components/engine/api/types/plugin_env.go b/components/engine/api/types/plugin_env.go new file mode 100644 index 0000000000..32962dc2eb --- /dev/null +++ b/components/engine/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/components/engine/api/types/plugin_interface_type.go b/components/engine/api/types/plugin_interface_type.go new file mode 100644 index 0000000000..c82f204e87 --- /dev/null +++ b/components/engine/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/components/engine/api/types/plugin_mount.go b/components/engine/api/types/plugin_mount.go new file mode 100644 index 0000000000..5c031cf8b5 --- /dev/null +++ b/components/engine/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/components/engine/api/types/plugin_responses.go b/components/engine/api/types/plugin_responses.go new file mode 100644 index 0000000000..1c6461f2d9 --- /dev/null +++ b/components/engine/api/types/plugin_responses.go @@ -0,0 +1,79 @@ +package types + +import ( + "encoding/json" + "fmt" + "sort" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +const ( + authzDriver = "AuthzDriver" + graphDriver = "GraphDriver" + ipamDriver = "IpamDriver" + networkDriver = "NetworkDriver" + volumeDriver = "VolumeDriver" +) + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege + +func (s PluginPrivileges) Len() int { + return len(s) +} + +func (s PluginPrivileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s PluginPrivileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/components/engine/api/types/plugins/logdriver/entry.pb.go b/components/engine/api/types/plugins/logdriver/entry.pb.go new file mode 100644 index 0000000000..5d7d8b4c41 --- /dev/null +++ b/components/engine/api/types/plugins/logdriver/entry.pb.go @@ -0,0 +1,449 @@ +// Code generated by protoc-gen-gogo. +// source: entry.proto +// DO NOT EDIT! + +/* + Package logdriver is a generated protocol buffer package. + + It is generated from these files: + entry.proto + + It has these top-level messages: + LogEntry +*/ +package logdriver + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type LogEntry struct { + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + TimeNano int64 `protobuf:"varint,2,opt,name=time_nano,json=timeNano,proto3" json:"time_nano,omitempty"` + Line []byte `protobuf:"bytes,3,opt,name=line,proto3" json:"line,omitempty"` + Partial bool `protobuf:"varint,4,opt,name=partial,proto3" json:"partial,omitempty"` +} + +func (m *LogEntry) Reset() { *m = LogEntry{} } +func (m *LogEntry) String() string { return proto.CompactTextString(m) } +func (*LogEntry) ProtoMessage() {} +func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptorEntry, []int{0} } + +func (m *LogEntry) GetSource() string { + if m != nil { + return m.Source + } + return "" +} + +func (m *LogEntry) GetTimeNano() int64 { + if m != nil { + return m.TimeNano + } + return 0 +} + +func (m *LogEntry) GetLine() []byte { + if m != nil { + return m.Line + } + return nil +} + +func (m *LogEntry) GetPartial() bool { + if m != nil { + return m.Partial + } + return false +} + +func init() { + proto.RegisterType((*LogEntry)(nil), "LogEntry") +} +func (m *LogEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Source) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintEntry(dAtA, i, uint64(len(m.Source))) + i += copy(dAtA[i:], m.Source) + } + if m.TimeNano != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintEntry(dAtA, i, uint64(m.TimeNano)) + } + if len(m.Line) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEntry(dAtA, i, uint64(len(m.Line))) + i += copy(dAtA[i:], m.Line) + } + if m.Partial { + dAtA[i] = 0x20 + i++ + if m.Partial { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func encodeFixed64Entry(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Entry(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintEntry(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *LogEntry) Size() (n int) { + var l int + _ = l + l = len(m.Source) + if l > 0 { + n += 1 + l + sovEntry(uint64(l)) + } + if m.TimeNano != 0 { + n += 1 + sovEntry(uint64(m.TimeNano)) + } + l = len(m.Line) + if l > 0 { + n += 1 + l + sovEntry(uint64(l)) + } + if m.Partial { + n += 2 + } + return n +} + +func sovEntry(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEntry(x uint64) (n int) { + return sovEntry(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LogEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEntry + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeNano", wireType) + } + m.TimeNano = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeNano |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEntry + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Line = append(m.Line[:0], dAtA[iNdEx:postIndex]...) + if m.Line == nil { + m.Line = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partial", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Partial = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipEntry(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEntry + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEntry(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEntry + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEntry(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEntry = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEntry = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("entry.proto", fileDescriptorEntry) } + +var fileDescriptorEntry = []byte{ + // 149 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0xcd, 0x2b, 0x29, + 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0xca, 0xe5, 0xe2, 0xf0, 0xc9, 0x4f, 0x77, 0x05, + 0x89, 0x08, 0x89, 0x71, 0xb1, 0x15, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x41, 0x79, 0x42, 0xd2, 0x5c, 0x9c, 0x25, 0x99, 0xb9, 0xa9, 0xf1, 0x79, 0x89, 0x79, + 0xf9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x1c, 0x20, 0x01, 0xbf, 0xc4, 0xbc, 0x7c, 0x21, + 0x21, 0x2e, 0x96, 0x9c, 0xcc, 0xbc, 0x54, 0x09, 0x66, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x30, 0x5b, + 0x48, 0x82, 0x8b, 0xbd, 0x20, 0xb1, 0xa8, 0x24, 0x33, 0x31, 0x47, 0x82, 0x45, 0x81, 0x51, 0x83, + 0x23, 0x08, 0xc6, 0x75, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, + 0xe4, 0x18, 0x93, 0xd8, 0xc0, 0x6e, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x2d, 0x24, 0x5a, + 0xd4, 0x92, 0x00, 0x00, 0x00, +} diff --git a/components/engine/api/types/plugins/logdriver/entry.proto b/components/engine/api/types/plugins/logdriver/entry.proto new file mode 100644 index 0000000000..a4e96ea5f4 --- /dev/null +++ b/components/engine/api/types/plugins/logdriver/entry.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +message LogEntry { + string source = 1; + int64 time_nano = 2; + bytes line = 3; + bool partial = 4; +} diff --git a/components/engine/api/types/plugins/logdriver/gen.go b/components/engine/api/types/plugins/logdriver/gen.go new file mode 100644 index 0000000000..068f987324 --- /dev/null +++ b/components/engine/api/types/plugins/logdriver/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc --gogofast_out=import_path=github.com/docker/docker/api/types/plugins/logdriver:. entry.proto + +package logdriver diff --git a/components/engine/api/types/plugins/logdriver/io.go b/components/engine/api/types/plugins/logdriver/io.go new file mode 100644 index 0000000000..8c1ed49dd6 --- /dev/null +++ b/components/engine/api/types/plugins/logdriver/io.go @@ -0,0 +1,87 @@ +package logdriver + +import ( + "encoding/binary" + "io" +) + +const binaryEncodeLen = 4 + +// LogEntryEncoder encodes a LogEntry to a protobuf stream +// The stream should look like: +// +// [uint32 binary encoded message size][protobuf message] +// +// To decode an entry, read the first 4 bytes to get the size of the entry, +// then read `size` bytes from the stream. +type LogEntryEncoder interface { + Encode(*LogEntry) error +} + +// NewLogEntryEncoder creates a protobuf stream encoder for log entries. +// This is used to write out log entries to a stream. +func NewLogEntryEncoder(w io.Writer) LogEntryEncoder { + return &logEntryEncoder{ + w: w, + buf: make([]byte, 1024), + } +} + +type logEntryEncoder struct { + buf []byte + w io.Writer +} + +func (e *logEntryEncoder) Encode(l *LogEntry) error { + n := l.Size() + + total := n + binaryEncodeLen + if total > len(e.buf) { + e.buf = make([]byte, total) + } + binary.BigEndian.PutUint32(e.buf, uint32(n)) + + if _, err := l.MarshalTo(e.buf[binaryEncodeLen:]); err != nil { + return err + } + _, err := e.w.Write(e.buf[:total]) + return err +} + +// LogEntryDecoder decodes log entries from a stream +// It is expected that the wire format is as defined by LogEntryEncoder. +type LogEntryDecoder interface { + Decode(*LogEntry) error +} + +// NewLogEntryDecoder creates a new stream decoder for log entries +func NewLogEntryDecoder(r io.Reader) LogEntryDecoder { + return &logEntryDecoder{ + lenBuf: make([]byte, binaryEncodeLen), + buf: make([]byte, 1024), + r: r, + } +} + +type logEntryDecoder struct { + r io.Reader + lenBuf []byte + buf []byte +} + +func (d *logEntryDecoder) Decode(l *LogEntry) error { + _, err := io.ReadFull(d.r, d.lenBuf) + if err != nil { + return err + } + + size := int(binary.BigEndian.Uint32(d.lenBuf)) + if len(d.buf) < size { + d.buf = make([]byte, size) + } + + if _, err := io.ReadFull(d.r, d.buf[:size]); err != nil { + return err + } + return l.Unmarshal(d.buf[:size]) +} diff --git a/components/engine/api/types/port.go b/components/engine/api/types/port.go new file mode 100644 index 0000000000..ad52d46d56 --- /dev/null +++ b/components/engine/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // IP + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/components/engine/api/types/registry/authenticate.go b/components/engine/api/types/registry/authenticate.go new file mode 100644 index 0000000000..42cac4430a --- /dev/null +++ b/components/engine/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/components/engine/api/types/registry/registry.go b/components/engine/api/types/registry/registry.go new file mode 100644 index 0000000000..b98a943a13 --- /dev/null +++ b/components/engine/api/types/registry/registry.go @@ -0,0 +1,119 @@ +package registry + +import ( + "encoding/json" + "net" + + "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor v1.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []v1.Platform +} diff --git a/components/engine/api/types/seccomp.go b/components/engine/api/types/seccomp.go new file mode 100644 index 0000000000..7d62c9a43f --- /dev/null +++ b/components/engine/api/types/seccomp.go @@ -0,0 +1,93 @@ +package types + +// Seccomp represents the config for a seccomp profile for syscall restriction. +type Seccomp struct { + DefaultAction Action `json:"defaultAction"` + // Architectures is kept to maintain backward compatibility with the old + // seccomp profile. + Architectures []Arch `json:"architectures,omitempty"` + ArchMap []Architecture `json:"archMap,omitempty"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Architecture is used to represent a specific architecture +// and its sub-architectures +type Architecture struct { + Arch Arch `json:"architecture"` + SubArches []Arch `json:"subArchitectures"` +} + +// Arch used for architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" +) + +// Action taken upon Seccomp rule match +type Action string + +// Define actions for Seccomp rules +const ( + ActKill Action = "SCMP_ACT_KILL" + ActTrap Action = "SCMP_ACT_TRAP" + ActErrno Action = "SCMP_ACT_ERRNO" + ActTrace Action = "SCMP_ACT_TRACE" + ActAllow Action = "SCMP_ACT_ALLOW" +) + +// Operator used to match syscall arguments in Seccomp +type Operator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual Operator = "SCMP_CMP_NE" + OpLessThan Operator = "SCMP_CMP_LT" + OpLessEqual Operator = "SCMP_CMP_LE" + OpEqualTo Operator = "SCMP_CMP_EQ" + OpGreaterEqual Operator = "SCMP_CMP_GE" + OpGreaterThan Operator = "SCMP_CMP_GT" + OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" +) + +// Arg used for matching specific syscall arguments in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op Operator `json:"op"` +} + +// Filter is used to conditionally apply Seccomp rules +type Filter struct { + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` +} + +// Syscall is used to match a group of syscalls in Seccomp +type Syscall struct { + Name string `json:"name,omitempty"` + Names []string `json:"names,omitempty"` + Action Action `json:"action"` + Args []*Arg `json:"args"` + Comment string `json:"comment"` + Includes Filter `json:"includes"` + Excludes Filter `json:"excludes"` +} diff --git a/components/engine/api/types/service_update_response.go b/components/engine/api/types/service_update_response.go new file mode 100644 index 0000000000..74ea64b1bb --- /dev/null +++ b/components/engine/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/components/engine/api/types/stats.go b/components/engine/api/types/stats.go new file mode 100644 index 0000000000..7ca76a5b63 --- /dev/null +++ b/components/engine/api/types/stats.go @@ -0,0 +1,181 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/components/engine/api/types/strslice/strslice.go b/components/engine/api/types/strslice/strslice.go new file mode 100644 index 0000000000..bad493fb89 --- /dev/null +++ b/components/engine/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/components/engine/api/types/strslice/strslice_test.go b/components/engine/api/types/strslice/strslice_test.go new file mode 100644 index 0000000000..1163b3652c --- /dev/null +++ b/components/engine/api/types/strslice/strslice_test.go @@ -0,0 +1,86 @@ +package strslice + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestStrSliceMarshalJSON(t *testing.T) { + for _, testcase := range []struct { + input StrSlice + expected string + }{ + // MADNESS(stevvooe): No clue why nil would be "" but empty would be + // "null". Had to make a change here that may affect compatibility. + {input: nil, expected: "null"}, + {StrSlice{}, "[]"}, + {StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`}, + } { + data, err := json.Marshal(testcase.input) + if err != nil { + t.Fatal(err) + } + if string(data) != testcase.expected { + t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data)) + } + } +} + +func TestStrSliceUnmarshalJSON(t *testing.T) { + parts := map[string][]string{ + "": {"default", "values"}, + "[]": {}, + `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, + } + for json, expectedParts := range parts { + strs := StrSlice{"default", "values"} + if err := strs.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := []string(strs) + if !reflect.DeepEqual(actualParts, expectedParts) { + t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts) + } + + } +} + +func TestStrSliceUnmarshalString(t *testing.T) { + var e StrSlice + echo, err := json.Marshal("echo") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} + +func TestStrSliceUnmarshalSlice(t *testing.T) { + var e StrSlice + echo, err := json.Marshal([]string{"echo"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} diff --git a/components/engine/api/types/swarm/common.go b/components/engine/api/types/swarm/common.go new file mode 100644 index 0000000000..54af82b31b --- /dev/null +++ b/components/engine/api/types/swarm/common.go @@ -0,0 +1,40 @@ +package swarm + +import "time" + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:"Labels"` +} + +// Driver represents a driver (network, logging). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/components/engine/api/types/swarm/config.go b/components/engine/api/types/swarm/config.go new file mode 100644 index 0000000000..0fb021ce92 --- /dev/null +++ b/components/engine/api/types/swarm/config.go @@ -0,0 +1,31 @@ +package swarm + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget + ConfigID string + ConfigName string +} diff --git a/components/engine/api/types/swarm/container.go b/components/engine/api/types/swarm/container.go new file mode 100644 index 0000000000..6f8b45f6bb --- /dev/null +++ b/components/engine/api/types/swarm/container.go @@ -0,0 +1,72 @@ +package swarm + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + StopSignal string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` +} diff --git a/components/engine/api/types/swarm/network.go b/components/engine/api/types/swarm/network.go new file mode 100644 index 0000000000..97c484e14c --- /dev/null +++ b/components/engine/api/types/swarm/network.go @@ -0,0 +1,119 @@ +package swarm + +import ( + "github.com/docker/docker/api/types/network" +) + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/components/engine/api/types/swarm/node.go b/components/engine/api/types/swarm/node.go new file mode 100644 index 0000000000..28c6851e9c --- /dev/null +++ b/components/engine/api/types/swarm/node.go @@ -0,0 +1,115 @@ +package swarm + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) diff --git a/components/engine/api/types/swarm/runtime.go b/components/engine/api/types/swarm/runtime.go new file mode 100644 index 0000000000..c4c731dc82 --- /dev/null +++ b/components/engine/api/types/swarm/runtime.go @@ -0,0 +1,19 @@ +package swarm + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) diff --git a/components/engine/api/types/swarm/secret.go b/components/engine/api/types/swarm/secret.go new file mode 100644 index 0000000000..fdb2388888 --- /dev/null +++ b/components/engine/api/types/swarm/secret.go @@ -0,0 +1,31 @@ +package swarm + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/components/engine/api/types/swarm/service.go b/components/engine/api/types/swarm/service.go new file mode 100644 index 0000000000..fa31a7ec86 --- /dev/null +++ b/components/engine/api/types/swarm/service.go @@ -0,0 +1,124 @@ +package swarm + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback = "rollback" + + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst = "start-first" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order string +} diff --git a/components/engine/api/types/swarm/swarm.go b/components/engine/api/types/swarm/swarm.go new file mode 100644 index 0000000000..5b74f14b11 --- /dev/null +++ b/components/engine/api/types/swarm/swarm.go @@ -0,0 +1,217 @@ +package swarm + +import "time" + +// ClusterInfo represents info about the cluster for outputing in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + ForceNewCluster bool + Spec Spec + AutoLockManagers bool + Availability NodeAvailability +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability NodeAvailability +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` + + Cluster *ClusterInfo `json:",omitempty"` +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/components/engine/api/types/swarm/task.go b/components/engine/api/types/swarm/task.go new file mode 100644 index 0000000000..9fd52e47d0 --- /dev/null +++ b/components/engine/api/types/swarm/task.go @@ -0,0 +1,152 @@ +package swarm + +import "time" + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + ContainerSpec ContainerSpec `json:",omitempty"` + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` + // TODO (ehazlett): this should be removed and instead + // use struct tags (proto) for the runtimes + RuntimeData []byte `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory). +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Resources `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string `json:",omitempty"` + PID int `json:",omitempty"` + ExitCode int `json:",omitempty"` +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} diff --git a/components/engine/api/types/time/duration_convert.go b/components/engine/api/types/time/duration_convert.go new file mode 100644 index 0000000000..63e1eec19e --- /dev/null +++ b/components/engine/api/types/time/duration_convert.go @@ -0,0 +1,12 @@ +package time + +import ( + "strconv" + "time" +) + +// DurationToSecondsString converts the specified duration to the number +// seconds it represents, formatted as a string. +func DurationToSecondsString(duration time.Duration) string { + return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) +} diff --git a/components/engine/api/types/time/duration_convert_test.go b/components/engine/api/types/time/duration_convert_test.go new file mode 100644 index 0000000000..869c08f863 --- /dev/null +++ b/components/engine/api/types/time/duration_convert_test.go @@ -0,0 +1,26 @@ +package time + +import ( + "testing" + "time" +) + +func TestDurationToSecondsString(t *testing.T) { + cases := []struct { + in time.Duration + expected string + }{ + {0 * time.Second, "0"}, + {1 * time.Second, "1"}, + {1 * time.Minute, "60"}, + {24 * time.Hour, "86400"}, + } + + for _, c := range cases { + s := DurationToSecondsString(c.in) + if s != c.expected { + t.Errorf("wrong value for input `%v`: expected `%s`, got `%s`", c.in, c.expected, s) + t.Fail() + } + } +} diff --git a/components/engine/api/types/time/timestamp.go b/components/engine/api/types/time/timestamp.go new file mode 100644 index 0000000000..9aa9702dad --- /dev/null +++ b/components/engine/api/types/time/timestamp.go @@ -0,0 +1,124 @@ +package time + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + var parseInLocation bool + + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp otherwise assume unixtimestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/components/engine/api/types/time/timestamp_test.go b/components/engine/api/types/time/timestamp_test.go new file mode 100644 index 0000000000..a1651309d7 --- /dev/null +++ b/components/engine/api/types/time/timestamp_test.go @@ -0,0 +1,93 @@ +package time + +import ( + "fmt" + "testing" + "time" +) + +func TestGetTimestamp(t *testing.T) { + now := time.Now().In(time.UTC) + cases := []struct { + in, expected string + expectedErr bool + }{ + // Partial RFC3339 strings get parsed with second precision + {"2006-01-02T15:04:05.999999999+07:00", "1136189045.999999999", false}, + {"2006-01-02T15:04:05.999999999Z", "1136214245.999999999", false}, + {"2006-01-02T15:04:05.999999999", "1136214245.999999999", false}, + {"2006-01-02T15:04:05Z", "1136214245.000000000", false}, + {"2006-01-02T15:04:05", "1136214245.000000000", false}, + {"2006-01-02T15:04:0Z", "", true}, + {"2006-01-02T15:04:0", "", true}, + {"2006-01-02T15:04Z", "1136214240.000000000", false}, + {"2006-01-02T15:04+00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04-00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04", "1136214240.000000000", false}, + {"2006-01-02T15:0Z", "", true}, + {"2006-01-02T15:0", "", true}, + {"2006-01-02T15Z", "1136214000.000000000", false}, + {"2006-01-02T15+00:00", "1136214000.000000000", false}, + {"2006-01-02T15-00:00", "1136214000.000000000", false}, + {"2006-01-02T15", "1136214000.000000000", false}, + {"2006-01-02T1Z", "1136163600.000000000", false}, + {"2006-01-02T1", "1136163600.000000000", false}, + {"2006-01-02TZ", "", true}, + {"2006-01-02T", "", true}, + {"2006-01-02+00:00", "1136160000.000000000", false}, + {"2006-01-02-00:00", "1136160000.000000000", false}, + {"2006-01-02-00:01", "1136160060.000000000", false}, + {"2006-01-02Z", "1136160000.000000000", false}, + {"2006-01-02", "1136160000.000000000", false}, + {"2015-05-13T20:39:09Z", "1431549549.000000000", false}, + + // unix timestamps returned as is + {"1136073600", "1136073600", false}, + {"1136073600.000000001", "1136073600.000000001", false}, + // Durations + {"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix()), false}, + {"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + {"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + + // String fallback + {"invalid", "invalid", false}, + } + + for _, c := range cases { + o, err := GetTimestamp(c.in, now) + if o != c.expected || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong value for '%s'. expected:'%s' got:'%s' with error: `%s`", c.in, c.expected, o, err) + t.Fail() + } + } +} + +func TestParseTimestamps(t *testing.T) { + cases := []struct { + in string + def, expectedS, expectedN int64 + expectedErr bool + }{ + // unix timestamps + {"1136073600", 0, 1136073600, 0, false}, + {"1136073600.000000001", 0, 1136073600, 1, false}, + {"1136073600.0000000010", 0, 1136073600, 1, false}, + {"1136073600.00000001", 0, 1136073600, 10, false}, + {"foo.bar", 0, 0, 0, true}, + {"1136073600.bar", 0, 1136073600, 0, true}, + {"", -1, -1, 0, false}, + } + + for _, c := range cases { + s, n, err := ParseTimestamps(c.in, c.def) + if s != c.expectedS || + n != c.expectedN || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong values for input `%s` with default `%d` expected:'%d'seconds and `%d`nanosecond got:'%d'seconds and `%d`nanoseconds with error: `%s`", c.in, c.def, c.expectedS, c.expectedN, s, n, err) + t.Fail() + } + } +} diff --git a/components/engine/api/types/types.go b/components/engine/api/types/types.go new file mode 100644 index 0000000000..c905466e29 --- /dev/null +++ b/components/engine/api/types/types.go @@ -0,0 +1,554 @@ +package types + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + OsVersion string `json:",omitempty"` + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + OSType string + Experimental bool +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + split := strings.Split(opt, ",") + for _, s := range split { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid security option %q", s) + } + if kv[0] == "" || kv[1] == "" { + return nil, errors.New("invalid empty security option") + } + if kv[0] == "name" { + secopt.Name = kv[1] + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only available in Docker Swarm +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` + Name string + RestartCount int + Driver string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + Type mount.Type `json:",omitempty"` + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]network.ServiceInfo `json:",omitempty"` +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + // Check for networks with duplicate names. + // Network is primarily keyed based on a random ID and not on the name. + // Network name is strictly a user-friendly alias to the network + // which is uniquely identified using ID. + // And there is no guaranteed way to check for duplicates. + // Option CheckDuplicate is there to provide a best effort checking of any networks + // which has the same name but it is not guaranteed to catch all name collisions. + CheckDuplicate bool + Driver string + Scope string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDeleteResponseItem + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest string + Size int +} + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} diff --git a/components/engine/api/types/versions/README.md b/components/engine/api/types/versions/README.md new file mode 100644 index 0000000000..1ef911edb0 --- /dev/null +++ b/components/engine/api/types/versions/README.md @@ -0,0 +1,14 @@ +# Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +## Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/components/engine/api/types/versions/compare.go b/components/engine/api/types/versions/compare.go new file mode 100644 index 0000000000..611d4fed66 --- /dev/null +++ b/components/engine/api/types/versions/compare.go @@ -0,0 +1,62 @@ +package versions + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/components/engine/api/types/versions/compare_test.go b/components/engine/api/types/versions/compare_test.go new file mode 100644 index 0000000000..c2b96869f7 --- /dev/null +++ b/components/engine/api/types/versions/compare_test.go @@ -0,0 +1,26 @@ +package versions + +import ( + "testing" +) + +func assertVersion(t *testing.T, a, b string, result int) { + if r := compare(a, b); r != result { + t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareVersion(t *testing.T) { + assertVersion(t, "1.12", "1.12", 0) + assertVersion(t, "1.0.0", "1", 0) + assertVersion(t, "1", "1.0.0", 0) + assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) + assertVersion(t, "1", "1.0.1", -1) + assertVersion(t, "1.0.1", "1", 1) + assertVersion(t, "1.0.1", "1.0.2", -1) + assertVersion(t, "1.0.2", "1.0.3", -1) + assertVersion(t, "1.0.3", "1.1", -1) + assertVersion(t, "1.1", "1.1.1", -1) + assertVersion(t, "1.1.1", "1.1.2", -1) + assertVersion(t, "1.1.2", "1.2", -1) +} diff --git a/components/engine/api/types/versions/v1p19/types.go b/components/engine/api/types/versions/v1p19/types.go new file mode 100644 index 0000000000..dc13150545 --- /dev/null +++ b/components/engine/api/types/versions/v1p19/types.go @@ -0,0 +1,35 @@ +// Package v1p19 provides specific API types for the API version 1, patch 19. +package v1p19 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. +// Note this is not used by the Windows daemon. +type ContainerJSON struct { + *types.ContainerJSONBase + Volumes map[string]string + VolumesRW map[string]bool + Config *ContainerConfig + NetworkSettings *v1p20.NetworkSettings +} + +// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string + Memory int64 + MemorySwap int64 + CPUShares int64 `json:"CpuShares"` + CPUSet string `json:"Cpuset"` +} diff --git a/components/engine/api/types/versions/v1p20/types.go b/components/engine/api/types/versions/v1p20/types.go new file mode 100644 index 0000000000..94a06d7452 --- /dev/null +++ b/components/engine/api/types/versions/v1p20/types.go @@ -0,0 +1,40 @@ +// Package v1p20 provides specific API types for the API version 1, patch 20. +package v1p20 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for the API 1.20 +type ContainerJSON struct { + *types.ContainerJSONBase + Mounts []types.MountPoint + Config *ContainerConfig + NetworkSettings *NetworkSettings +} + +// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string +} + +// StatsJSON is a backcompatibility struct used in Stats for APIs prior to 1.21 +type StatsJSON struct { + types.Stats + Network types.NetworkStats `json:"network,omitempty"` +} + +// NetworkSettings is a backward compatible struct for APIs prior to 1.21 +type NetworkSettings struct { + types.NetworkSettingsBase + types.DefaultNetworkSettings +} diff --git a/components/engine/api/types/volume.go b/components/engine/api/types/volume.go new file mode 100644 index 0000000000..da4f8ebd9c --- /dev/null +++ b/components/engine/api/types/volume.go @@ -0,0 +1,58 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *VolumeUsageData `json:"UsageData,omitempty"` +} + +// VolumeUsageData volume usage data +// swagger:model VolumeUsageData +type VolumeUsageData struct { + + // The number of containers referencing this volume. + // Required: true + RefCount int64 `json:"RefCount"` + + // The disk space used by the volume (local driver only) + // Required: true + Size int64 `json:"Size"` +} diff --git a/components/engine/api/types/volume/volumes_create.go b/components/engine/api/types/volume/volumes_create.go new file mode 100644 index 0000000000..9f70e43ca4 --- /dev/null +++ b/components/engine/api/types/volume/volumes_create.go @@ -0,0 +1,29 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// VolumesCreateBody volumes create body +// swagger:model VolumesCreateBody +type VolumesCreateBody struct { + + // Name of the volume driver to use. + // Required: true + Driver string `json:"Driver"` + + // A mapping of driver options and values. These options are passed directly to the driver and are driver specific. + // Required: true + DriverOpts map[string]string `json:"DriverOpts"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // The new volume's name. If not specified, Docker generates a name. + // Required: true + Name string `json:"Name"` +} diff --git a/components/engine/api/types/volume/volumes_list.go b/components/engine/api/types/volume/volumes_list.go new file mode 100644 index 0000000000..833dad9330 --- /dev/null +++ b/components/engine/api/types/volume/volumes_list.go @@ -0,0 +1,23 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +import "github.com/docker/docker/api/types" + +// VolumesListOKBody volumes list o k body +// swagger:model VolumesListOKBody +type VolumesListOKBody struct { + + // List of volumes + // Required: true + Volumes []*types.Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/components/engine/builder/builder.go b/components/engine/builder/builder.go new file mode 100644 index 0000000000..cc7c25955a --- /dev/null +++ b/components/engine/builder/builder.go @@ -0,0 +1,99 @@ +// Package builder defines interfaces for any Docker builder to implement. +// +// Historically, only server-side Dockerfile interpreters existed. +// This package allows for other implementations of Docker builders. +package builder + +import ( + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + containerpkg "github.com/docker/docker/container" +) + +const ( + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" +) + +// Source defines a location that can be used as a source for the ADD/COPY +// instructions in the builder. +type Source interface { + // Root returns root path for accessing source + Root() string + // Close allows to signal that the filesystem tree won't be used anymore. + // For Context implementations using a temporary directory, it is recommended to + // delete the temporary directory in Close(). + Close() error + // Hash returns a checksum for a file + Hash(path string) (string, error) +} + +// Backend abstracts calls to a Docker Daemon. +type Backend interface { + ImageBackend + + // ContainerAttachRaw attaches to container. + ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error + // ContainerCreate creates a new Docker container and returns potential warnings + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + // ContainerRm removes a container specified by `id`. + ContainerRm(name string, config *types.ContainerRmConfig) error + // Commit creates a new Docker image from an existing Docker container. + Commit(string, *backend.ContainerCommitConfig) (string, error) + // ContainerKill stops the container execution abruptly. + ContainerKill(containerID string, sig uint64) error + // ContainerStart starts a new container + ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + // ContainerWait stops processing until the given container is stopped. + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) + // ContainerCreateWorkdir creates the workdir + ContainerCreateWorkdir(containerID string) error + + // ContainerCopy copies/extracts a source FileInfo to a destination path inside a container + // specified by a container object. + // TODO: extract in the builder instead of passing `decompress` + // TODO: use containerd/fs.changestream instead as a source + CopyOnBuild(containerID string, destPath string, srcRoot string, srcPath string, decompress bool) error +} + +// ImageBackend are the interface methods required from an image component +type ImageBackend interface { + GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ReleaseableLayer, error) +} + +// Result is the output produced by a Builder +type Result struct { + ImageID string + FromImage Image +} + +// ImageCacheBuilder represents a generator for stateful image cache. +type ImageCacheBuilder interface { + // MakeImageCache creates a stateful image cache. + MakeImageCache(cacheFrom []string) ImageCache +} + +// ImageCache abstracts an image cache. +// (parent image, child runconfig) -> child image +type ImageCache interface { + // GetCache returns a reference to a cached image whose parent equals `parent` + // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. + GetCache(parentID string, cfg *container.Config) (imageID string, err error) +} + +// Image represents a Docker image used by the builder. +type Image interface { + ImageID() string + RunConfig() *container.Config +} + +// ReleaseableLayer is an image layer that can be mounted and released +type ReleaseableLayer interface { + Release() error + Mount() (string, error) +} diff --git a/components/engine/builder/dockerfile/bflag.go b/components/engine/builder/dockerfile/bflag.go new file mode 100644 index 0000000000..d849661620 --- /dev/null +++ b/components/engine/builder/dockerfile/bflag.go @@ -0,0 +1,183 @@ +package dockerfile + +import ( + "fmt" + "strings" +) + +// FlagType is the type of the build flag +type FlagType int + +const ( + boolType FlagType = iota + stringType +) + +// BFlags contains all flags information for the builder +type BFlags struct { + Args []string // actual flags/args from cmd line + flags map[string]*Flag + used map[string]*Flag + Err error +} + +// Flag contains all information for a flag +type Flag struct { + bf *BFlags + name string + flagType FlagType + Value string +} + +// NewBFlags returns the new BFlags struct +func NewBFlags() *BFlags { + return &BFlags{ + flags: make(map[string]*Flag), + used: make(map[string]*Flag), + } +} + +// NewBFlagsWithArgs returns the new BFlags struct with Args set to args +func NewBFlagsWithArgs(args []string) *BFlags { + flags := NewBFlags() + flags.Args = args + return flags +} + +// AddBool adds a bool flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddBool(name string, def bool) *Flag { + flag := bf.addFlag(name, boolType) + if flag == nil { + return nil + } + if def { + flag.Value = "true" + } else { + flag.Value = "false" + } + return flag +} + +// AddString adds a string flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddString(name string, def string) *Flag { + flag := bf.addFlag(name, stringType) + if flag == nil { + return nil + } + flag.Value = def + return flag +} + +// addFlag is a generic func used by the other AddXXX() func +// to add a new flag to the BFlags struct. +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { + if _, ok := bf.flags[name]; ok { + bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) + return nil + } + + newFlag := &Flag{ + bf: bf, + name: name, + flagType: flagType, + } + bf.flags[name] = newFlag + + return newFlag +} + +// IsUsed checks if the flag is used +func (fl *Flag) IsUsed() bool { + if _, ok := fl.bf.used[fl.name]; ok { + return true + } + return false +} + +// IsTrue checks if a bool flag is true +func (fl *Flag) IsTrue() bool { + if fl.flagType != boolType { + // Should never get here + panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) + } + return fl.Value == "true" +} + +// Parse parses and checks if the BFlags is valid. +// Any error noticed during the AddXXX() funcs will be generated/returned +// here. We do this because an error during AddXXX() is more like a +// compile time error so it doesn't matter too much when we stop our +// processing as long as we do stop it, so this allows the code +// around AddXXX() to be just: +// defFlag := AddString("description", "") +// w/o needing to add an if-statement around each one. +func (bf *BFlags) Parse() error { + // If there was an error while defining the possible flags + // go ahead and bubble it back up here since we didn't do it + // earlier in the processing + if bf.Err != nil { + return fmt.Errorf("Error setting up flags: %s", bf.Err) + } + + for _, arg := range bf.Args { + if !strings.HasPrefix(arg, "--") { + return fmt.Errorf("Arg should start with -- : %s", arg) + } + + if arg == "--" { + return nil + } + + arg = arg[2:] + value := "" + + index := strings.Index(arg, "=") + if index >= 0 { + value = arg[index+1:] + arg = arg[:index] + } + + flag, ok := bf.flags[arg] + if !ok { + return fmt.Errorf("Unknown flag: %s", arg) + } + + if _, ok = bf.used[arg]; ok { + return fmt.Errorf("Duplicate flag specified: %s", arg) + } + + bf.used[arg] = flag + + switch flag.flagType { + case boolType: + // value == "" is only ok if no "=" was specified + if index >= 0 && value == "" { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + + lower := strings.ToLower(value) + if lower == "" { + flag.Value = "true" + } else if lower == "true" || lower == "false" { + flag.Value = lower + } else { + return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) + } + + case stringType: + if index < 0 { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + flag.Value = value + + default: + panic("No idea what kind of flag we have! Should never get here!") + } + + } + + return nil +} diff --git a/components/engine/builder/dockerfile/bflag_test.go b/components/engine/builder/dockerfile/bflag_test.go new file mode 100644 index 0000000000..ac07e48c14 --- /dev/null +++ b/components/engine/builder/dockerfile/bflag_test.go @@ -0,0 +1,187 @@ +package dockerfile + +import ( + "testing" +) + +func TestBuilderFlags(t *testing.T) { + var expected string + var err error + + // --- + + bf := NewBFlags() + bf.Args = []string{} + if err := bf.Parse(); err != nil { + t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + bf.Args = []string{"--"} + if err := bf.Parse(); err != nil { + t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + flStr1 := bf.AddString("str1", "") + flBool1 := bf.AddBool("bool1", false) + bf.Args = []string{} + if err = bf.Parse(); err != nil { + t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.IsUsed() == true { + t.Fatal("Test3 - str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatal("Test3 - bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "HI" { + t.Fatal("Str1 was supposed to default to: HI") + } + if flBool1.IsTrue() { + t.Fatal("Bool1 was supposed to default to: false") + } + if flStr1.IsUsed() == true { + t.Fatal("Str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatal("Bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1="} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "BYE" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatal("Test-b1 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=true"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatal("Test-b2 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flBool1.IsTrue() { + t.Fatal("Test-b3 Bool1 was supposed to be false") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool2"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1", "--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "BYE" { + t.Fatalf("Test %s, str1 should be BYE", bf.Args) + } + if !flBool1.IsTrue() { + t.Fatalf("Test %s, bool1 should be true", bf.Args) + } +} diff --git a/components/engine/builder/dockerfile/buildargs.go b/components/engine/builder/dockerfile/buildargs.go new file mode 100644 index 0000000000..44687aaff4 --- /dev/null +++ b/components/engine/builder/dockerfile/buildargs.go @@ -0,0 +1,147 @@ +package dockerfile + +import ( + "fmt" + "github.com/docker/docker/runconfig/opts" + "io" +) + +// builtinAllowedBuildArgs is list of built-in allowed build args +// these args are considered transparent and are excluded from the image history. +// Filtering from history is implemented in dispatchers.go +var builtinAllowedBuildArgs = map[string]bool{ + "HTTP_PROXY": true, + "http_proxy": true, + "HTTPS_PROXY": true, + "https_proxy": true, + "FTP_PROXY": true, + "ftp_proxy": true, + "NO_PROXY": true, + "no_proxy": true, +} + +// buildArgs manages arguments used by the builder +type buildArgs struct { + // args that are allowed for expansion/substitution and passing to commands in 'run'. + allowedBuildArgs map[string]*string + // args defined before the first `FROM` in a Dockerfile + allowedMetaArgs map[string]*string + // args referenced by the Dockerfile + referencedArgs map[string]struct{} + // args provided by the user on the command line + argsFromOptions map[string]*string +} + +func newBuildArgs(argsFromOptions map[string]*string) *buildArgs { + return &buildArgs{ + allowedBuildArgs: make(map[string]*string), + allowedMetaArgs: make(map[string]*string), + referencedArgs: make(map[string]struct{}), + argsFromOptions: argsFromOptions, + } +} + +// WarnOnUnusedBuildArgs checks if there are any leftover build-args that were +// passed but not consumed during build. Print a warning, if there are any. +func (b *buildArgs) WarnOnUnusedBuildArgs(out io.Writer) { + leftoverArgs := []string{} + for arg := range b.argsFromOptions { + _, isReferenced := b.referencedArgs[arg] + _, isBuiltin := builtinAllowedBuildArgs[arg] + if !isBuiltin && !isReferenced { + leftoverArgs = append(leftoverArgs, arg) + } + } + if len(leftoverArgs) > 0 { + fmt.Fprintf(out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) + } +} + +// ResetAllowed clears the list of args that are allowed to be used by a +// directive +func (b *buildArgs) ResetAllowed() { + b.allowedBuildArgs = make(map[string]*string) +} + +// AddMetaArg adds a new meta arg that can be used by FROM directives +func (b *buildArgs) AddMetaArg(key string, value *string) { + b.allowedMetaArgs[key] = value +} + +// AddArg adds a new arg that can be used by directives +func (b *buildArgs) AddArg(key string, value *string) { + b.allowedBuildArgs[key] = value + b.referencedArgs[key] = struct{}{} +} + +// IsReferencedOrNotBuiltin checks if the key is a built-in arg, or if it has been +// referenced by the Dockerfile. Returns true if the arg is not a builtin or +// if the builtin has been referenced in the Dockerfile. +func (b *buildArgs) IsReferencedOrNotBuiltin(key string) bool { + _, isBuiltin := builtinAllowedBuildArgs[key] + _, isAllowed := b.allowedBuildArgs[key] + return isAllowed || !isBuiltin +} + +// GetAllAllowed returns a mapping with all the allowed args +func (b *buildArgs) GetAllAllowed() map[string]string { + return b.getAllFromMapping(b.allowedBuildArgs) +} + +// GetAllMeta returns a mapping with all the meta meta args +func (b *buildArgs) GetAllMeta() map[string]string { + return b.getAllFromMapping(b.allowedMetaArgs) +} + +func (b *buildArgs) getAllFromMapping(source map[string]*string) map[string]string { + m := make(map[string]string) + + keys := keysFromMaps(source, builtinAllowedBuildArgs) + for _, key := range keys { + v, ok := b.getBuildArg(key, source) + if ok { + m[key] = v + } + } + return m +} + +// FilterAllowed returns all allowed args without the filtered args +func (b *buildArgs) FilterAllowed(filter []string) []string { + envs := []string{} + configEnv := opts.ConvertKVStringsToMap(filter) + + for key, val := range b.GetAllAllowed() { + if _, ok := configEnv[key]; !ok { + envs = append(envs, fmt.Sprintf("%s=%s", key, val)) + } + } + return envs +} + +func (b *buildArgs) getBuildArg(key string, mapping map[string]*string) (string, bool) { + defaultValue, exists := mapping[key] + // Return override from options if one is defined + if v, ok := b.argsFromOptions[key]; ok && v != nil { + return *v, ok + } + + if defaultValue == nil { + if v, ok := b.allowedMetaArgs[key]; ok && v != nil { + return *v, ok + } + return "", false + } + return *defaultValue, exists +} + +func keysFromMaps(source map[string]*string, builtin map[string]bool) []string { + keys := []string{} + for key := range source { + keys = append(keys, key) + } + for key := range builtin { + keys = append(keys, key) + } + return keys +} diff --git a/components/engine/builder/dockerfile/buildargs_test.go b/components/engine/builder/dockerfile/buildargs_test.go new file mode 100644 index 0000000000..0288770665 --- /dev/null +++ b/components/engine/builder/dockerfile/buildargs_test.go @@ -0,0 +1,100 @@ +package dockerfile + +import ( + "testing" + + "bytes" + "github.com/stretchr/testify/assert" +) + +func strPtr(source string) *string { + return &source +} + +func TestGetAllAllowed(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ArgNotUsedInDockerfile": strPtr("fromopt1"), + "ArgOverriddenByOptions": strPtr("fromopt2"), + "ArgNoDefaultInDockerfileFromOptions": strPtr("fromopt3"), + "HTTP_PROXY": strPtr("theproxy"), + }) + + buildArgs.AddMetaArg("ArgFromMeta", strPtr("frommeta1")) + buildArgs.AddMetaArg("ArgFromMetaOverriden", strPtr("frommeta2")) + buildArgs.AddMetaArg("ArgFromMetaNotUsed", strPtr("frommeta3")) + + buildArgs.AddArg("ArgOverriddenByOptions", strPtr("fromdockerfile2")) + buildArgs.AddArg("ArgWithDefaultInDockerfile", strPtr("fromdockerfile1")) + buildArgs.AddArg("ArgNoDefaultInDockerfile", nil) + buildArgs.AddArg("ArgNoDefaultInDockerfileFromOptions", nil) + buildArgs.AddArg("ArgFromMeta", nil) + buildArgs.AddArg("ArgFromMetaOverriden", strPtr("fromdockerfile3")) + + all := buildArgs.GetAllAllowed() + expected := map[string]string{ + "HTTP_PROXY": "theproxy", + "ArgOverriddenByOptions": "fromopt2", + "ArgWithDefaultInDockerfile": "fromdockerfile1", + "ArgNoDefaultInDockerfileFromOptions": "fromopt3", + "ArgFromMeta": "frommeta1", + "ArgFromMetaOverriden": "fromdockerfile3", + } + assert.Equal(t, expected, all) +} + +func TestGetAllMeta(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ArgNotUsedInDockerfile": strPtr("fromopt1"), + "ArgOverriddenByOptions": strPtr("fromopt2"), + "ArgNoDefaultInMetaFromOptions": strPtr("fromopt3"), + "HTTP_PROXY": strPtr("theproxy"), + }) + + buildArgs.AddMetaArg("ArgFromMeta", strPtr("frommeta1")) + buildArgs.AddMetaArg("ArgOverriddenByOptions", strPtr("frommeta2")) + buildArgs.AddMetaArg("ArgNoDefaultInMetaFromOptions", nil) + + all := buildArgs.GetAllMeta() + expected := map[string]string{ + "HTTP_PROXY": "theproxy", + "ArgFromMeta": "frommeta1", + "ArgOverriddenByOptions": "fromopt2", + "ArgNoDefaultInMetaFromOptions": "fromopt3", + } + assert.Equal(t, expected, all) +} + +func TestWarnOnUnusedBuildArgs(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ThisArgIsUsed": strPtr("fromopt1"), + "ThisArgIsNotUsed": strPtr("fromopt2"), + "HTTPS_PROXY": strPtr("referenced builtin"), + "HTTP_PROXY": strPtr("unreferenced builtin"), + }) + buildArgs.AddArg("ThisArgIsUsed", nil) + buildArgs.AddArg("HTTPS_PROXY", nil) + + buffer := new(bytes.Buffer) + buildArgs.WarnOnUnusedBuildArgs(buffer) + out := buffer.String() + assert.NotContains(t, out, "ThisArgIsUsed") + assert.NotContains(t, out, "HTTPS_PROXY") + assert.NotContains(t, out, "HTTP_PROXY") + assert.Contains(t, out, "ThisArgIsNotUsed") +} + +func TestIsUnreferencedBuiltin(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ThisArgIsUsed": strPtr("fromopt1"), + "ThisArgIsNotUsed": strPtr("fromopt2"), + "HTTPS_PROXY": strPtr("referenced builtin"), + "HTTP_PROXY": strPtr("unreferenced builtin"), + }) + buildArgs.AddArg("ThisArgIsUsed", nil) + buildArgs.AddArg("HTTPS_PROXY", nil) + + assert.True(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsUsed")) + assert.True(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsNotUsed")) + assert.True(t, buildArgs.IsReferencedOrNotBuiltin("HTTPS_PROXY")) + assert.False(t, buildArgs.IsReferencedOrNotBuiltin("HTTP_PROXY")) +} diff --git a/components/engine/builder/dockerfile/builder.go b/components/engine/builder/dockerfile/builder.go new file mode 100644 index 0000000000..c77d535df4 --- /dev/null +++ b/components/engine/builder/dockerfile/builder.go @@ -0,0 +1,313 @@ +package dockerfile + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/sync/syncmap" +) + +var validCommitCommands = map[string]bool{ + "cmd": true, + "entrypoint": true, + "healthcheck": true, + "env": true, + "expose": true, + "label": true, + "onbuild": true, + "user": true, + "volume": true, + "workdir": true, +} + +var defaultLogConfig = container.LogConfig{Type: "none"} + +// BuildManager is shared across all Builder objects +type BuildManager struct { + backend builder.Backend + pathCache pathCache // TODO: make this persistent +} + +// NewBuildManager creates a BuildManager +func NewBuildManager(b builder.Backend) *BuildManager { + return &BuildManager{ + backend: b, + pathCache: &syncmap.Map{}, + } +} + +// Build starts a new build from a BuildConfig +func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (*builder.Result, error) { + buildsTriggered.Inc() + if config.Options.Dockerfile == "" { + config.Options.Dockerfile = builder.DefaultDockerfileName + } + + source, dockerfile, err := remotecontext.Detect(config) + if err != nil { + return nil, err + } + if source != nil { + defer func() { + if err := source.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + }() + } + + builderOptions := builderOptions{ + Options: config.Options, + ProgressWriter: config.ProgressWriter, + Backend: bm.backend, + PathCache: bm.pathCache, + } + return newBuilder(ctx, builderOptions).build(source, dockerfile) +} + +// builderOptions are the dependencies required by the builder +type builderOptions struct { + Options *types.ImageBuildOptions + Backend builder.Backend + ProgressWriter backend.ProgressWriter + PathCache pathCache +} + +// Builder is a Dockerfile builder +// It implements the builder.Backend interface. +type Builder struct { + options *types.ImageBuildOptions + + Stdout io.Writer + Stderr io.Writer + Aux *streamformatter.AuxFormatter + Output io.Writer + + docker builder.Backend + clientCtx context.Context + + tmpContainers map[string]struct{} + buildStages *buildStages + disableCommit bool + cacheBusted bool + buildArgs *buildArgs + imageCache builder.ImageCache + imageSources *imageSources + pathCache pathCache +} + +// newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options. +func newBuilder(clientCtx context.Context, options builderOptions) *Builder { + config := options.Options + if config == nil { + config = new(types.ImageBuildOptions) + } + b := &Builder{ + clientCtx: clientCtx, + options: config, + Stdout: options.ProgressWriter.StdoutFormatter, + Stderr: options.ProgressWriter.StderrFormatter, + Aux: options.ProgressWriter.AuxFormatter, + Output: options.ProgressWriter.Output, + docker: options.Backend, + tmpContainers: map[string]struct{}{}, + buildArgs: newBuildArgs(config.BuildArgs), + buildStages: newBuildStages(), + imageSources: newImageSources(clientCtx, options), + pathCache: options.PathCache, + } + return b +} + +func (b *Builder) resetImageCache() { + if icb, ok := b.docker.(builder.ImageCacheBuilder); ok { + b.imageCache = icb.MakeImageCache(b.options.CacheFrom) + } + b.cacheBusted = false +} + +// Build runs the Dockerfile builder by parsing the Dockerfile and executing +// the instructions from the file. +func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*builder.Result, error) { + defer b.imageSources.Unmount() + + addNodesForLabelOption(dockerfile.AST, b.options.Labels) + + if err := checkDispatchDockerfile(dockerfile.AST); err != nil { + buildsFailed.WithValues(metricsDockerfileSyntaxError).Inc() + return nil, err + } + + dispatchState, err := b.dispatchDockerfileWithCancellation(dockerfile, source) + if err != nil { + return nil, err + } + + if b.options.Target != "" && !dispatchState.isCurrentStage(b.options.Target) { + buildsFailed.WithValues(metricsBuildTargetNotReachableError).Inc() + return nil, errors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target) + } + + b.buildArgs.WarnOnUnusedBuildArgs(b.Stderr) + + if dispatchState.imageID == "" { + buildsFailed.WithValues(metricsDockerfileEmptyError).Inc() + return nil, errors.New("No image was generated. Is your Dockerfile empty?") + } + return &builder.Result{ImageID: dispatchState.imageID, FromImage: dispatchState.baseImage}, nil +} + +func emitImageID(aux *streamformatter.AuxFormatter, state *dispatchState) error { + if aux == nil || state.imageID == "" { + return nil + } + return aux.Emit(types.BuildResult{ID: state.imageID}) +} + +func (b *Builder) dispatchDockerfileWithCancellation(dockerfile *parser.Result, source builder.Source) (*dispatchState, error) { + shlex := NewShellLex(dockerfile.EscapeToken) + state := newDispatchState() + total := len(dockerfile.AST.Children) + var err error + for i, n := range dockerfile.AST.Children { + select { + case <-b.clientCtx.Done(): + logrus.Debug("Builder: build cancelled!") + fmt.Fprint(b.Stdout, "Build cancelled") + buildsFailed.WithValues(metricsBuildCanceled).Inc() + return nil, errors.New("Build cancelled") + default: + // Not cancelled yet, keep going... + } + + // If this is a FROM and we have a previous image then + // emit an aux message for that image since it is the + // end of the previous stage + if n.Value == command.From { + if err := emitImageID(b.Aux, state); err != nil { + return nil, err + } + } + + if n.Value == command.From && state.isCurrentStage(b.options.Target) { + break + } + + opts := dispatchOptions{ + state: state, + stepMsg: formatStep(i, total), + node: n, + shlex: shlex, + source: source, + } + if state, err = b.dispatch(opts); err != nil { + if b.options.ForceRemove { + b.clearTmp() + } + return nil, err + } + + fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(state.imageID)) + if b.options.Remove { + b.clearTmp() + } + } + + // Emit a final aux message for the final image + if err := emitImageID(b.Aux, state); err != nil { + return nil, err + } + + return state, nil +} + +func addNodesForLabelOption(dockerfile *parser.Node, labels map[string]string) { + if len(labels) == 0 { + return + } + + node := parser.NodeFromLabels(labels) + dockerfile.Children = append(dockerfile.Children, node) +} + +// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile +// It will: +// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. +// - Do build by calling builder.dispatch() to call all entries' handling routines +// +// BuildFromConfig is used by the /commit endpoint, with the changes +// coming from the query parameter of the same name. +// +// TODO: Remove? +func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { + if len(changes) == 0 { + return config, nil + } + + b := newBuilder(context.Background(), builderOptions{}) + + dockerfile, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) + if err != nil { + return nil, err + } + + // ensure that the commands are valid + for _, n := range dockerfile.AST.Children { + if !validCommitCommands[n.Value] { + return nil, fmt.Errorf("%s is not a valid change command", n.Value) + } + } + + b.Stdout = ioutil.Discard + b.Stderr = ioutil.Discard + b.disableCommit = true + + if err := checkDispatchDockerfile(dockerfile.AST); err != nil { + return nil, err + } + dispatchState := newDispatchState() + dispatchState.runConfig = config + return dispatchFromDockerfile(b, dockerfile, dispatchState) +} + +func checkDispatchDockerfile(dockerfile *parser.Node) error { + for _, n := range dockerfile.Children { + if err := checkDispatch(n); err != nil { + return errors.Wrapf(err, "Dockerfile parse error line %d", n.StartLine) + } + } + return nil +} + +func dispatchFromDockerfile(b *Builder, result *parser.Result, dispatchState *dispatchState) (*container.Config, error) { + shlex := NewShellLex(result.EscapeToken) + ast := result.AST + total := len(ast.Children) + + for i, n := range ast.Children { + opts := dispatchOptions{ + state: dispatchState, + stepMsg: formatStep(i, total), + node: n, + shlex: shlex, + } + if _, err := b.dispatch(opts); err != nil { + return nil, err + } + } + return dispatchState.runConfig, nil +} diff --git a/components/engine/builder/dockerfile/builder_test.go b/components/engine/builder/dockerfile/builder_test.go new file mode 100644 index 0000000000..5fedca0fdd --- /dev/null +++ b/components/engine/builder/dockerfile/builder_test.go @@ -0,0 +1,34 @@ +package dockerfile + +import ( + "strings" + "testing" + + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/stretchr/testify/assert" +) + +func TestAddNodesForLabelOption(t *testing.T) { + dockerfile := "FROM scratch" + result, err := parser.Parse(strings.NewReader(dockerfile)) + assert.NoError(t, err) + + labels := map[string]string{ + "org.e": "cli-e", + "org.d": "cli-d", + "org.c": "cli-c", + "org.b": "cli-b", + "org.a": "cli-a", + } + nodes := result.AST + addNodesForLabelOption(nodes, labels) + + expected := []string{ + "FROM scratch", + `LABEL "org.a"='cli-a' "org.b"='cli-b' "org.c"='cli-c' "org.d"='cli-d' "org.e"='cli-e'`, + } + assert.Len(t, nodes.Children, 2) + for i, v := range nodes.Children { + assert.Equal(t, expected[i], v.Original) + } +} diff --git a/components/engine/builder/dockerfile/builder_unix.go b/components/engine/builder/dockerfile/builder_unix.go new file mode 100644 index 0000000000..76a7ce74f9 --- /dev/null +++ b/components/engine/builder/dockerfile/builder_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package dockerfile + +var defaultShell = []string{"/bin/sh", "-c"} diff --git a/components/engine/builder/dockerfile/builder_windows.go b/components/engine/builder/dockerfile/builder_windows.go new file mode 100644 index 0000000000..37e9fbcf4b --- /dev/null +++ b/components/engine/builder/dockerfile/builder_windows.go @@ -0,0 +1,3 @@ +package dockerfile + +var defaultShell = []string{"cmd", "/S", "/C"} diff --git a/components/engine/builder/dockerfile/command/command.go b/components/engine/builder/dockerfile/command/command.go new file mode 100644 index 0000000000..f23c6874b5 --- /dev/null +++ b/components/engine/builder/dockerfile/command/command.go @@ -0,0 +1,46 @@ +// Package command contains the set of Dockerfile commands. +package command + +// Define constants for the command strings +const ( + Add = "add" + Arg = "arg" + Cmd = "cmd" + Copy = "copy" + Entrypoint = "entrypoint" + Env = "env" + Expose = "expose" + From = "from" + Healthcheck = "healthcheck" + Label = "label" + Maintainer = "maintainer" + Onbuild = "onbuild" + Run = "run" + Shell = "shell" + StopSignal = "stopsignal" + User = "user" + Volume = "volume" + Workdir = "workdir" +) + +// Commands is list of all Dockerfile commands +var Commands = map[string]struct{}{ + Add: {}, + Arg: {}, + Cmd: {}, + Copy: {}, + Entrypoint: {}, + Env: {}, + Expose: {}, + From: {}, + Healthcheck: {}, + Label: {}, + Maintainer: {}, + Onbuild: {}, + Run: {}, + Shell: {}, + StopSignal: {}, + User: {}, + Volume: {}, + Workdir: {}, +} diff --git a/components/engine/builder/dockerfile/copy.go b/components/engine/builder/dockerfile/copy.go new file mode 100644 index 0000000000..77f0a44d55 --- /dev/null +++ b/components/engine/builder/dockerfile/copy.go @@ -0,0 +1,360 @@ +package dockerfile + +import ( + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +type pathCache interface { + Load(key interface{}) (value interface{}, ok bool) + Store(key, value interface{}) +} + +// copyInfo is a data object which stores the metadata about each source file in +// a copyInstruction +type copyInfo struct { + root string + path string + hash string +} + +func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo { + return copyInfo{root: source.Root(), path: path, hash: hash} +} + +func newCopyInfos(copyInfos ...copyInfo) []copyInfo { + return copyInfos +} + +// copyInstruction is a fully parsed COPY or ADD command that is passed to +// Builder.performCopy to copy files into the image filesystem +type copyInstruction struct { + cmdName string + infos []copyInfo + dest string + allowLocalDecompression bool +} + +// copier reads a raw COPY or ADD command, fetches remote sources using a downloader, +// and creates a copyInstruction +type copier struct { + imageSource *imageMount + source builder.Source + pathCache pathCache + download sourceDownloader + tmpPaths []string +} + +func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier { + return copier{ + source: req.source, + pathCache: req.builder.pathCache, + download: download, + imageSource: imageSource, + } +} + +func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) { + inst := copyInstruction{cmdName: cmdName} + last := len(args) - 1 + + // Work in daemon-specific filepath semantics + inst.dest = filepath.FromSlash(args[last]) + + infos, err := o.getCopyInfosForSourcePaths(args[0:last]) + if err != nil { + return inst, errors.Wrapf(err, "%s failed", cmdName) + } + if len(infos) > 1 && !strings.HasSuffix(inst.dest, string(os.PathSeparator)) { + return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + inst.infos = infos + return inst, nil +} + +// getCopyInfosForSourcePaths iterates over the source files and calculate the info +// needed to copy (e.g. hash value if cached) +func (o *copier) getCopyInfosForSourcePaths(sources []string) ([]copyInfo, error) { + var infos []copyInfo + for _, orig := range sources { + subinfos, err := o.getCopyInfoForSourcePath(orig) + if err != nil { + return nil, err + } + infos = append(infos, subinfos...) + } + + if len(infos) == 0 { + return nil, errors.New("no source files were specified") + } + return infos, nil +} + +func (o *copier) getCopyInfoForSourcePath(orig string) ([]copyInfo, error) { + if !urlutil.IsURL(orig) { + return o.calcCopyInfo(orig, true) + } + remote, path, err := o.download(orig) + if err != nil { + return nil, err + } + o.tmpPaths = append(o.tmpPaths, remote.Root()) + + hash, err := remote.Hash(path) + return newCopyInfos(newCopyInfoFromSource(remote, path, hash)), err +} + +// Cleanup removes any temporary directories created as part of downloading +// remote files. +func (o *copier) Cleanup() { + for _, path := range o.tmpPaths { + os.RemoveAll(path) + } + o.tmpPaths = []string{} +} + +// TODO: allowWildcards can probably be removed by refactoring this function further. +func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) { + imageSource := o.imageSource + if err := validateCopySourcePath(imageSource, origPath); err != nil { + return nil, err + } + + // Work in daemon-specific OS filepath semantics + origPath = filepath.FromSlash(origPath) + origPath = strings.TrimPrefix(origPath, string(os.PathSeparator)) + origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) + + // TODO: do this when creating copier. Requires validateCopySourcePath + // (and other below) to be aware of the difference sources. Why is it only + // done on image Source? + if imageSource != nil { + var err error + o.source, err = imageSource.Source() + if err != nil { + return nil, errors.Wrapf(err, "failed to copy") + } + } + + if o.source == nil { + return nil, errors.Errorf("missing build context") + } + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath) { + return o.copyWithWildcards(origPath) + } + + if imageSource != nil && imageSource.ImageID() != "" { + // return a cached copy if one exists + if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok { + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil + } + } + + // Deal with the single file case + copyInfo, err := copyInfoForFile(o.source, origPath) + switch { + case err != nil: + return nil, err + case copyInfo.hash != "": + o.storeInPathCache(imageSource, origPath, copyInfo.hash) + return newCopyInfos(copyInfo), err + } + + // TODO: remove, handle dirs in Hash() + subfiles, err := walkSource(o.source, origPath) + if err != nil { + return nil, err + } + + hash := hashStringSlice("dir", subfiles) + o.storeInPathCache(imageSource, origPath, hash) + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil +} + +func (o *copier) storeInPathCache(im *imageMount, path string, hash string) { + if im != nil { + o.pathCache.Store(im.ImageID()+path, hash) + } +} + +func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) { + var copyInfos []copyInfo + if err := filepath.Walk(o.source.Root(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(o.source.Root(), path) + if err != nil { + return err + } + + if rel == "." { + return nil + } + if match, _ := filepath.Match(origPath, rel); !match { + return nil + } + + // Note we set allowWildcards to false in case the name has + // a * in it + subInfos, err := o.calcCopyInfo(rel, false) + if err != nil { + return err + } + copyInfos = append(copyInfos, subInfos...) + return nil + }); err != nil { + return nil, err + } + return copyInfos, nil +} + +func copyInfoForFile(source builder.Source, path string) (copyInfo, error) { + fi, err := remotecontext.StatAt(source, path) + if err != nil { + return copyInfo{}, err + } + + if fi.IsDir() { + return copyInfo{}, nil + } + hash, err := source.Hash(path) + if err != nil { + return copyInfo{}, err + } + return newCopyInfoFromSource(source, path, "file:"+hash), nil +} + +// TODO: dedupe with copyWithWildcards() +func walkSource(source builder.Source, origPath string) ([]string, error) { + fp, err := remotecontext.FullPath(source, origPath) + if err != nil { + return nil, err + } + // Must be a dir + var subfiles []string + err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(source.Root(), path) + if err != nil { + return err + } + if rel == "." { + return nil + } + hash, err := source.Hash(rel) + if err != nil { + return nil + } + // we already checked handleHash above + subfiles = append(subfiles, hash) + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(subfiles) + return subfiles, nil +} + +type sourceDownloader func(string) (builder.Source, string, error) + +func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader { + return func(url string) (builder.Source, string, error) { + return downloadSource(output, stdout, url) + } +} + +func errOnSourceDownload(_ string) (builder.Source, string, error) { + return nil, "", errors.New("source can't be a URL for COPY") +} + +func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) { + // get filename from URL + u, err := url.Parse(srcURL) + if err != nil { + return + } + filename := filepath.Base(filepath.FromSlash(u.Path)) // Ensure in platform semantics + if filename == "" { + err = errors.Errorf("cannot determine filename from url: %s", u) + return + } + + // Initiate the download + resp, err := httputils.Download(srcURL) + if err != nil { + return + } + + // Prepare file in a tmp dir + tmpDir, err := ioutils.TempDir("", "docker-remote") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + tmpFileName := filepath.Join(tmpDir, filename) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return + } + + progressOutput := streamformatter.NewJSONProgressOutput(output, true) + progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") + // Download and dump result to tmp file + // TODO: add filehash directly + if _, err = io.Copy(tmpFile, progressReader); err != nil { + tmpFile.Close() + return + } + // TODO: how important is this random blank line to the output? + fmt.Fprintln(stdout) + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + mTime := time.Time{} + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + tmpFile.Close() + + if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { + return + } + + lc, err := remotecontext.NewLazyContext(tmpDir) + return lc, filename, err +} diff --git a/components/engine/builder/dockerfile/dispatchers.go b/components/engine/builder/dockerfile/dispatchers.go new file mode 100644 index 0000000000..78eeab31c5 --- /dev/null +++ b/components/engine/builder/dockerfile/dispatchers.go @@ -0,0 +1,878 @@ +package dockerfile + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "bytes" + "fmt" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/signal" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" +) + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func env(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("ENV") + } + + if len(req.args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("ENV") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + commitMessage := bytes.NewBufferString("ENV") + + for j := 0; j < len(req.args); j += 2 { + if len(req.args[j]) == 0 { + return errBlankCommandNames("ENV") + } + name := req.args[j] + value := req.args[j+1] + newVar := name + "=" + value + commitMessage.WriteString(" " + newVar) + + gotOne := false + for i, envVar := range runConfig.Env { + envParts := strings.SplitN(envVar, "=", 2) + compareFrom := envParts[0] + if equalEnvKeys(compareFrom, name) { + runConfig.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + runConfig.Env = append(runConfig.Env, newVar) + } + } + + return req.builder.commit(req.state, commitMessage.String()) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func maintainer(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("MAINTAINER") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + maintainer := req.args[0] + req.state.maintainer = maintainer + return req.builder.commit(req.state, "MAINTAINER "+maintainer) +} + +// LABEL some json data describing the image +// +// Sets the Label variable foo to bar, +// +func label(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("LABEL") + } + if len(req.args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("LABEL") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + commitStr := "LABEL" + runConfig := req.state.runConfig + + if runConfig.Labels == nil { + runConfig.Labels = map[string]string{} + } + + for j := 0; j < len(req.args); j++ { + name := req.args[j] + if name == "" { + return errBlankCommandNames("LABEL") + } + + value := req.args[j+1] + commitStr += " " + name + "=" + value + + runConfig.Labels[name] = value + j++ + } + return req.builder.commit(req.state, commitStr) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func add(req dispatchRequest) error { + if len(req.args) < 2 { + return errAtLeastTwoArguments("ADD") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + downloader := newRemoteSourceDownloader(req.builder.Output, req.builder.Stdout) + copier := copierFromDispatchRequest(req, downloader, nil) + defer copier.Cleanup() + copyInstruction, err := copier.createCopyInstruction(req.args, "ADD") + if err != nil { + return err + } + copyInstruction.allowLocalDecompression = true + + return req.builder.performCopy(req.state, copyInstruction) +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(req dispatchRequest) error { + if len(req.args) < 2 { + return errAtLeastTwoArguments("COPY") + } + + flFrom := req.flags.AddString("from", "") + if err := req.flags.Parse(); err != nil { + return err + } + + im, err := req.builder.getImageMount(flFrom) + if err != nil { + return errors.Wrapf(err, "invalid from flag value %s", flFrom.Value) + } + + copier := copierFromDispatchRequest(req, errOnSourceDownload, im) + defer copier.Cleanup() + copyInstruction, err := copier.createCopyInstruction(req.args, "COPY") + if err != nil { + return err + } + + return req.builder.performCopy(req.state, copyInstruction) +} + +func (b *Builder) getImageMount(fromFlag *Flag) (*imageMount, error) { + if !fromFlag.IsUsed() { + // TODO: this could return the source in the default case as well? + return nil, nil + } + + imageRefOrID := fromFlag.Value + stage, err := b.buildStages.get(fromFlag.Value) + if err != nil { + return nil, err + } + if stage != nil { + imageRefOrID = stage.ImageID() + } + return b.imageSources.Get(imageRefOrID) +} + +// FROM imagename[:tag | @digest] [AS build-stage-name] +// +func from(req dispatchRequest) error { + stageName, err := parseBuildStageName(req.args) + if err != nil { + return err + } + + if err := req.flags.Parse(); err != nil { + return err + } + + req.builder.resetImageCache() + image, err := req.builder.getFromImage(req.shlex, req.args[0]) + if err != nil { + return err + } + if err := req.builder.buildStages.add(stageName, image); err != nil { + return err + } + req.state.beginStage(stageName, image) + req.builder.buildArgs.ResetAllowed() + if image.ImageID() == "" { + // Typically this means they used "FROM scratch" + return nil + } + + return processOnBuild(req) +} + +func parseBuildStageName(args []string) (string, error) { + stageName := "" + switch { + case len(args) == 3 && strings.EqualFold(args[1], "as"): + stageName = strings.ToLower(args[2]) + if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok { + return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", stageName) + } + case len(args) != 1: + return "", errors.New("FROM requires either one or three arguments") + } + + return stageName, nil +} + +// scratchImage is used as a token for the empty base image. It uses buildStage +// as a convenient implementation of builder.Image, but is not actually a +// buildStage. +var scratchImage builder.Image = &buildStage{} + +func (b *Builder) getFromImage(shlex *ShellLex, name string) (builder.Image, error) { + substitutionArgs := []string{} + for key, value := range b.buildArgs.GetAllMeta() { + substitutionArgs = append(substitutionArgs, key+"="+value) + } + + name, err := shlex.ProcessWord(name, substitutionArgs) + if err != nil { + return nil, err + } + + if im, ok := b.buildStages.getByName(name); ok { + return im, nil + } + + // Windows cannot support a container with no base image. + if name == api.NoBaseImageSpecifier { + if runtime.GOOS == "windows" { + return nil, errors.New("Windows does not support FROM scratch") + } + return scratchImage, nil + } + imageMount, err := b.imageSources.Get(name) + if err != nil { + return nil, err + } + return imageMount.Image(), nil +} + +func processOnBuild(req dispatchRequest) error { + dispatchState := req.state + // Process ONBUILD triggers if they exist + if nTriggers := len(dispatchState.runConfig.OnBuild); nTriggers != 0 { + word := "trigger" + if nTriggers > 1 { + word = "triggers" + } + fmt.Fprintf(req.builder.Stderr, "# Executing %d build %s...\n", nTriggers, word) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. + onBuildTriggers := dispatchState.runConfig.OnBuild + dispatchState.runConfig.OnBuild = []string{} + + // Reset stdin settings as all build actions run without stdin + dispatchState.runConfig.OpenStdin = false + dispatchState.runConfig.StdinOnce = false + + // parse the ONBUILD triggers by invoking the parser + for _, step := range onBuildTriggers { + dockerfile, err := parser.Parse(strings.NewReader(step)) + if err != nil { + return err + } + + for _, n := range dockerfile.AST.Children { + if err := checkDispatch(n); err != nil { + return err + } + + upperCasedCmd := strings.ToUpper(n.Value) + switch upperCasedCmd { + case "ONBUILD": + return errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return errors.Errorf("%s isn't allowed as an ONBUILD trigger", upperCasedCmd) + } + } + + if _, err := dispatchFromDockerfile(req.builder, dockerfile, dispatchState); err != nil { + return err + } + } + return nil +} + +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// +func onbuild(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("ONBUILD") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + triggerInstruction := strings.ToUpper(strings.TrimSpace(req.args[0])) + switch triggerInstruction { + case "ONBUILD": + return errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + runConfig := req.state.runConfig + original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "") + runConfig.OnBuild = append(runConfig.OnBuild, original) + return req.builder.commit(req.state, "ONBUILD "+original) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func workdir(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("WORKDIR") + } + + err := req.flags.Parse() + if err != nil { + return err + } + + runConfig := req.state.runConfig + // This is from the Dockerfile and will not necessarily be in platform + // specific semantics, hence ensure it is converted. + runConfig.WorkingDir, err = normaliseWorkdir(runConfig.WorkingDir, req.args[0]) + if err != nil { + return err + } + + // For performance reasons, we explicitly do a create/mkdir now + // This avoids having an unnecessary expensive mount/unmount calls + // (on Windows in particular) during each container create. + // Prior to 1.13, the mkdir was deferred and not executed at this step. + if req.builder.disableCommit { + // Don't call back into the daemon if we're going through docker commit --change "WORKDIR /foo". + // We've already updated the runConfig and that's enough. + return nil + } + + comment := "WORKDIR " + runConfig.WorkingDir + runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment)) + if hit, err := req.builder.probeCache(req.state, runConfigWithCommentCmd); err != nil || hit { + return err + } + + container, err := req.builder.docker.ContainerCreate(types.ContainerCreateConfig{ + Config: runConfigWithCommentCmd, + // Set a log config to override any default value set on the daemon + HostConfig: &container.HostConfig{LogConfig: defaultLogConfig}, + }) + if err != nil { + return err + } + req.builder.tmpContainers[container.ID] = struct{}{} + if err := req.builder.docker.ContainerCreateWorkdir(container.ID); err != nil { + return err + } + + return req.builder.commitContainer(req.state, container.ID, runConfigWithCommentCmd) +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under +// Windows, in the event there is only one argument The difference in processing: +// +// RUN echo hi # sh -c echo hi (Linux) +// RUN echo hi # cmd /S /C echo hi (Windows) +// RUN [ "echo", "hi" ] # echo hi +// +func run(req dispatchRequest) error { + if !req.state.hasFromImage() { + return errors.New("Please provide a source image with `from` prior to run") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + stateRunConfig := req.state.runConfig + args := handleJSONArgs(req.args, req.attributes) + if !req.attributes["json"] { + args = append(getShell(stateRunConfig), args...) + } + cmdFromArgs := strslice.StrSlice(args) + buildArgs := req.builder.buildArgs.FilterAllowed(stateRunConfig.Env) + + saveCmd := cmdFromArgs + if len(buildArgs) > 0 { + saveCmd = prependEnvOnCmd(req.builder.buildArgs, buildArgs, cmdFromArgs) + } + + runConfigForCacheProbe := copyRunConfig(stateRunConfig, + withCmd(saveCmd), + withEntrypointOverride(saveCmd, nil)) + hit, err := req.builder.probeCache(req.state, runConfigForCacheProbe) + if err != nil || hit { + return err + } + + runConfig := copyRunConfig(stateRunConfig, + withCmd(cmdFromArgs), + withEnv(append(stateRunConfig.Env, buildArgs...)), + withEntrypointOverride(saveCmd, strslice.StrSlice{""})) + + // set config as already being escaped, this prevents double escaping on windows + runConfig.ArgsEscaped = true + + logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) + cID, err := req.builder.create(runConfig) + if err != nil { + return err + } + if err := req.builder.run(cID, runConfig.Cmd); err != nil { + return err + } + + return req.builder.commitContainer(req.state, cID, runConfigForCacheProbe) +} + +// Derive the command to use for probeCache() and to commit in this container. +// Note that we only do this if there are any build-time env vars. Also, we +// use the special argument "|#" at the start of the args array. This will +// avoid conflicts with any RUN command since commands can not +// start with | (vertical bar). The "#" (number of build envs) is there to +// help ensure proper cache matches. We don't want a RUN command +// that starts with "foo=abc" to be considered part of a build-time env var. +// +// remove any unreferenced built-in args from the environment variables. +// These args are transparent so resulting image should be the same regardless +// of the value. +func prependEnvOnCmd(buildArgs *buildArgs, buildArgVars []string, cmd strslice.StrSlice) strslice.StrSlice { + var tmpBuildEnv []string + for _, env := range buildArgVars { + key := strings.SplitN(env, "=", 2)[0] + if buildArgs.IsReferencedOrNotBuiltin(key) { + tmpBuildEnv = append(tmpBuildEnv, env) + } + } + + sort.Strings(tmpBuildEnv) + tmpEnv := append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...) + return strslice.StrSlice(append(tmpEnv, cmd...)) +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func cmd(req dispatchRequest) error { + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + cmdSlice := handleJSONArgs(req.args, req.attributes) + if !req.attributes["json"] { + cmdSlice = append(getShell(runConfig), cmdSlice...) + } + + runConfig.Cmd = strslice.StrSlice(cmdSlice) + // set config as already being escaped, this prevents double escaping on windows + runConfig.ArgsEscaped = true + + if err := req.builder.commit(req.state, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { + return err + } + + if len(req.args) != 0 { + req.state.cmdSet = true + } + + return nil +} + +// parseOptInterval(flag) is the duration of flag.Value, or 0 if +// empty. An error is reported if the value is given and less than minimum duration. +func parseOptInterval(f *Flag) (time.Duration, error) { + s := f.Value + if s == "" { + return 0, nil + } + d, err := time.ParseDuration(s) + if err != nil { + return 0, err + } + if d < time.Duration(container.MinimumDuration) { + return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration) + } + return d, nil +} + +// HEALTHCHECK foo +// +// Set the default healthcheck command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func healthcheck(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("HEALTHCHECK") + } + runConfig := req.state.runConfig + typ := strings.ToUpper(req.args[0]) + args := req.args[1:] + if typ == "NONE" { + if len(args) != 0 { + return errors.New("HEALTHCHECK NONE takes no arguments") + } + test := strslice.StrSlice{typ} + runConfig.Healthcheck = &container.HealthConfig{ + Test: test, + } + } else { + if runConfig.Healthcheck != nil { + oldCmd := runConfig.Healthcheck.Test + if len(oldCmd) > 0 && oldCmd[0] != "NONE" { + fmt.Fprintf(req.builder.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) + } + } + + healthcheck := container.HealthConfig{} + + flInterval := req.flags.AddString("interval", "") + flTimeout := req.flags.AddString("timeout", "") + flStartPeriod := req.flags.AddString("start-period", "") + flRetries := req.flags.AddString("retries", "") + + if err := req.flags.Parse(); err != nil { + return err + } + + switch typ { + case "CMD": + cmdSlice := handleJSONArgs(args, req.attributes) + if len(cmdSlice) == 0 { + return errors.New("Missing command after HEALTHCHECK CMD") + } + + if !req.attributes["json"] { + typ = "CMD-SHELL" + } + + healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) + default: + return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) + } + + interval, err := parseOptInterval(flInterval) + if err != nil { + return err + } + healthcheck.Interval = interval + + timeout, err := parseOptInterval(flTimeout) + if err != nil { + return err + } + healthcheck.Timeout = timeout + + startPeriod, err := parseOptInterval(flStartPeriod) + if err != nil { + return err + } + healthcheck.StartPeriod = startPeriod + + if flRetries.Value != "" { + retries, err := strconv.ParseInt(flRetries.Value, 10, 32) + if err != nil { + return err + } + if retries < 1 { + return fmt.Errorf("--retries must be at least 1 (not %d)", retries) + } + healthcheck.Retries = int(retries) + } else { + healthcheck.Retries = 0 + } + + runConfig.Healthcheck = &healthcheck + } + + return req.builder.commit(req.state, fmt.Sprintf("HEALTHCHECK %q", runConfig.Healthcheck)) +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments +// to /usr/sbin/nginx. Uses the default shell if not in JSON format. +// +// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint +// is initialized at newBuilder time instead of through argument parsing. +// +func entrypoint(req dispatchRequest) error { + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + parsed := handleJSONArgs(req.args, req.attributes) + + switch { + case req.attributes["json"]: + // ENTRYPOINT ["echo", "hi"] + runConfig.Entrypoint = strslice.StrSlice(parsed) + case len(parsed) == 0: + // ENTRYPOINT [] + runConfig.Entrypoint = nil + default: + // ENTRYPOINT echo hi + runConfig.Entrypoint = strslice.StrSlice(append(getShell(runConfig), parsed[0])) + } + + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !req.state.cmdSet { + runConfig.Cmd = nil + } + + return req.builder.commit(req.state, fmt.Sprintf("ENTRYPOINT %q", runConfig.Entrypoint)) +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// req.runConfig.ExposedPorts for runconfig. +// +func expose(req dispatchRequest) error { + portsTab := req.args + + if len(req.args) == 0 { + return errAtLeastOneArgument("EXPOSE") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + if runConfig.ExposedPorts == nil { + runConfig.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(portsTab) + if err != nil { + return err + } + + // instead of using ports directly, we build a list of ports and sort it so + // the order is consistent. This prevents cache burst where map ordering + // changes between builds + portList := make([]string, len(ports)) + var i int + for port := range ports { + if _, exists := runConfig.ExposedPorts[port]; !exists { + runConfig.ExposedPorts[port] = struct{}{} + } + portList[i] = string(port) + i++ + } + sort.Strings(portList) + return req.builder.commit(req.state, "EXPOSE "+strings.Join(portList, " ")) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func user(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("USER") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + req.state.runConfig.User = req.args[0] + return req.builder.commit(req.state, fmt.Sprintf("USER %v", req.args)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func volume(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("VOLUME") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + if runConfig.Volumes == nil { + runConfig.Volumes = map[string]struct{}{} + } + for _, v := range req.args { + v = strings.TrimSpace(v) + if v == "" { + return errors.New("VOLUME specified can not be an empty string") + } + runConfig.Volumes[v] = struct{}{} + } + return req.builder.commit(req.state, fmt.Sprintf("VOLUME %v", req.args)) +} + +// STOPSIGNAL signal +// +// Set the signal that will be used to kill the container. +func stopSignal(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("STOPSIGNAL") + } + + sig := req.args[0] + _, err := signal.ParseSignal(sig) + if err != nil { + return err + } + + req.state.runConfig.StopSignal = sig + return req.builder.commit(req.state, fmt.Sprintf("STOPSIGNAL %v", req.args)) +} + +// ARG name[=value] +// +// Adds the variable foo to the trusted list of variables that can be passed +// to builder using the --build-arg flag for expansion/substitution or passing to 'run'. +// Dockerfile author may optionally set a default value of this variable. +func arg(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("ARG") + } + + var ( + name string + newValue string + hasDefault bool + ) + + arg := req.args[0] + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + if len(parts[0]) == 0 { + return errBlankCommandNames("ARG") + } + + name = parts[0] + newValue = parts[1] + hasDefault = true + } else { + name = arg + hasDefault = false + } + + var value *string + if hasDefault { + value = &newValue + } + req.builder.buildArgs.AddArg(name, value) + + // Arg before FROM doesn't add a layer + if !req.state.hasFromImage() { + req.builder.buildArgs.AddMetaArg(name, value) + return nil + } + return req.builder.commit(req.state, "ARG "+arg) +} + +// SHELL powershell -command +// +// Set the non-default shell to use. +func shell(req dispatchRequest) error { + if err := req.flags.Parse(); err != nil { + return err + } + shellSlice := handleJSONArgs(req.args, req.attributes) + switch { + case len(shellSlice) == 0: + // SHELL [] + return errAtLeastOneArgument("SHELL") + case req.attributes["json"]: + // SHELL ["powershell", "-command"] + req.state.runConfig.Shell = strslice.StrSlice(shellSlice) + default: + // SHELL powershell -command - not JSON + return errNotJSON("SHELL", req.original) + } + return req.builder.commit(req.state, fmt.Sprintf("SHELL %v", shellSlice)) +} + +func errAtLeastOneArgument(command string) error { + return fmt.Errorf("%s requires at least one argument", command) +} + +func errExactlyOneArgument(command string) error { + return fmt.Errorf("%s requires exactly one argument", command) +} + +func errAtLeastTwoArguments(command string) error { + return fmt.Errorf("%s requires at least two arguments", command) +} + +func errBlankCommandNames(command string) error { + return fmt.Errorf("%s names can not be blank", command) +} + +func errTooManyArguments(command string) error { + return fmt.Errorf("Bad input to %s, too many arguments", command) +} diff --git a/components/engine/builder/dockerfile/dispatchers_test.go b/components/engine/builder/dockerfile/dispatchers_test.go new file mode 100644 index 0000000000..6134ce4a0f --- /dev/null +++ b/components/engine/builder/dockerfile/dispatchers_test.go @@ -0,0 +1,515 @@ +package dockerfile + +import ( + "fmt" + "runtime" + "testing" + + "bytes" + "context" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/testutil" + "github.com/docker/go-connections/nat" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type commandWithFunction struct { + name string + function func(args []string) error +} + +func withArgs(f dispatcher) func([]string) error { + return func(args []string) error { + return f(dispatchRequest{args: args}) + } +} + +func withBuilderAndArgs(builder *Builder, f dispatcher) func([]string) error { + return func(args []string) error { + return f(defaultDispatchReq(builder, args...)) + } +} + +func defaultDispatchReq(builder *Builder, args ...string) dispatchRequest { + return dispatchRequest{ + builder: builder, + args: args, + flags: NewBFlags(), + shlex: NewShellLex(parser.DefaultEscapeToken), + state: &dispatchState{runConfig: &container.Config{}}, + } +} + +func newBuilderWithMockBackend() *Builder { + mockBackend := &MockBackend{} + ctx := context.Background() + b := &Builder{ + options: &types.ImageBuildOptions{}, + docker: mockBackend, + buildArgs: newBuildArgs(make(map[string]*string)), + tmpContainers: make(map[string]struct{}), + Stdout: new(bytes.Buffer), + clientCtx: ctx, + disableCommit: true, + imageSources: newImageSources(ctx, builderOptions{ + Options: &types.ImageBuildOptions{}, + Backend: mockBackend, + }), + buildStages: newBuildStages(), + } + return b +} + +func TestCommandsExactlyOneArgument(t *testing.T) { + commands := []commandWithFunction{ + {"MAINTAINER", withArgs(maintainer)}, + {"WORKDIR", withArgs(workdir)}, + {"USER", withArgs(user)}, + {"STOPSIGNAL", withArgs(stopSignal)}, + } + + for _, command := range commands { + err := command.function([]string{}) + assert.EqualError(t, err, errExactlyOneArgument(command.name).Error()) + } +} + +func TestCommandsAtLeastOneArgument(t *testing.T) { + commands := []commandWithFunction{ + {"ENV", withArgs(env)}, + {"LABEL", withArgs(label)}, + {"ONBUILD", withArgs(onbuild)}, + {"HEALTHCHECK", withArgs(healthcheck)}, + {"EXPOSE", withArgs(expose)}, + {"VOLUME", withArgs(volume)}, + } + + for _, command := range commands { + err := command.function([]string{}) + assert.EqualError(t, err, errAtLeastOneArgument(command.name).Error()) + } +} + +func TestCommandsAtLeastTwoArguments(t *testing.T) { + commands := []commandWithFunction{ + {"ADD", withArgs(add)}, + {"COPY", withArgs(dispatchCopy)}} + + for _, command := range commands { + err := command.function([]string{"arg1"}) + assert.EqualError(t, err, errAtLeastTwoArguments(command.name).Error()) + } +} + +func TestCommandsTooManyArguments(t *testing.T) { + commands := []commandWithFunction{ + {"ENV", withArgs(env)}, + {"LABEL", withArgs(label)}} + + for _, command := range commands { + err := command.function([]string{"arg1", "arg2", "arg3"}) + assert.EqualError(t, err, errTooManyArguments(command.name).Error()) + } +} + +func TestCommandsBlankNames(t *testing.T) { + builder := newBuilderWithMockBackend() + commands := []commandWithFunction{ + {"ENV", withBuilderAndArgs(builder, env)}, + {"LABEL", withBuilderAndArgs(builder, label)}, + } + + for _, command := range commands { + err := command.function([]string{"", ""}) + assert.EqualError(t, err, errBlankCommandNames(command.name).Error()) + } +} + +func TestEnv2Variables(t *testing.T) { + b := newBuilderWithMockBackend() + + args := []string{"var1", "val1", "var2", "val2"} + req := defaultDispatchReq(b, args...) + err := env(req) + require.NoError(t, err) + + expected := []string{ + fmt.Sprintf("%s=%s", args[0], args[1]), + fmt.Sprintf("%s=%s", args[2], args[3]), + } + assert.Equal(t, expected, req.state.runConfig.Env) +} + +func TestEnvValueWithExistingRunConfigEnv(t *testing.T) { + b := newBuilderWithMockBackend() + + args := []string{"var1", "val1"} + req := defaultDispatchReq(b, args...) + req.state.runConfig.Env = []string{"var1=old", "var2=fromenv"} + err := env(req) + require.NoError(t, err) + + expected := []string{ + fmt.Sprintf("%s=%s", args[0], args[1]), + "var2=fromenv", + } + assert.Equal(t, expected, req.state.runConfig.Env) +} + +func TestMaintainer(t *testing.T) { + maintainerEntry := "Some Maintainer " + + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, maintainerEntry) + err := maintainer(req) + require.NoError(t, err) + assert.Equal(t, maintainerEntry, req.state.maintainer) +} + +func TestLabel(t *testing.T) { + labelName := "label" + labelValue := "value" + + labelEntry := []string{labelName, labelValue} + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, labelEntry...) + err := label(req) + require.NoError(t, err) + + require.Contains(t, req.state.runConfig.Labels, labelName) + assert.Equal(t, req.state.runConfig.Labels[labelName], labelValue) +} + +func TestFromScratch(t *testing.T) { + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, "scratch") + err := from(req) + + if runtime.GOOS == "windows" { + assert.EqualError(t, err, "Windows does not support FROM scratch") + return + } + + require.NoError(t, err) + assert.True(t, req.state.hasFromImage()) + assert.Equal(t, "", req.state.imageID) + assert.Equal(t, []string{"PATH=" + system.DefaultPathEnv}, req.state.runConfig.Env) +} + +func TestFromWithArg(t *testing.T) { + tag, expected := ":sometag", "expectedthisid" + + getImage := func(name string) (builder.Image, builder.ReleaseableLayer, error) { + assert.Equal(t, "alpine"+tag, name) + return &mockImage{id: "expectedthisid"}, nil, nil + } + b := newBuilderWithMockBackend() + b.docker.(*MockBackend).getImageFunc = getImage + + require.NoError(t, arg(defaultDispatchReq(b, "THETAG="+tag))) + req := defaultDispatchReq(b, "alpine${THETAG}") + err := from(req) + + require.NoError(t, err) + assert.Equal(t, expected, req.state.imageID) + assert.Equal(t, expected, req.state.baseImage.ImageID()) + assert.Len(t, b.buildArgs.GetAllAllowed(), 0) + assert.Len(t, b.buildArgs.GetAllMeta(), 1) +} + +func TestFromWithUndefinedArg(t *testing.T) { + tag, expected := "sometag", "expectedthisid" + + getImage := func(name string) (builder.Image, builder.ReleaseableLayer, error) { + assert.Equal(t, "alpine", name) + return &mockImage{id: "expectedthisid"}, nil, nil + } + b := newBuilderWithMockBackend() + b.docker.(*MockBackend).getImageFunc = getImage + b.options.BuildArgs = map[string]*string{"THETAG": &tag} + + req := defaultDispatchReq(b, "alpine${THETAG}") + err := from(req) + require.NoError(t, err) + assert.Equal(t, expected, req.state.imageID) +} + +func TestFromMultiStageWithScratchNamedStage(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Windows does not support scratch") + } + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, "scratch", "AS", "base") + + require.NoError(t, from(req)) + assert.True(t, req.state.hasFromImage()) + + req.args = []string{"base"} + require.NoError(t, from(req)) + assert.True(t, req.state.hasFromImage()) +} + +func TestOnbuildIllegalTriggers(t *testing.T) { + triggers := []struct{ command, expectedError string }{ + {"ONBUILD", "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed"}, + {"MAINTAINER", "MAINTAINER isn't allowed as an ONBUILD trigger"}, + {"FROM", "FROM isn't allowed as an ONBUILD trigger"}} + + for _, trigger := range triggers { + b := newBuilderWithMockBackend() + + err := onbuild(defaultDispatchReq(b, trigger.command)) + testutil.ErrorContains(t, err, trigger.expectedError) + } +} + +func TestOnbuild(t *testing.T) { + b := newBuilderWithMockBackend() + + req := defaultDispatchReq(b, "ADD", ".", "/app/src") + req.original = "ONBUILD ADD . /app/src" + req.state.runConfig = &container.Config{} + + err := onbuild(req) + require.NoError(t, err) + assert.Equal(t, "ADD . /app/src", req.state.runConfig.OnBuild[0]) +} + +func TestWorkdir(t *testing.T) { + b := newBuilderWithMockBackend() + workingDir := "/app" + if runtime.GOOS == "windows" { + workingDir = "C:\app" + } + + req := defaultDispatchReq(b, workingDir) + err := workdir(req) + require.NoError(t, err) + assert.Equal(t, workingDir, req.state.runConfig.WorkingDir) +} + +func TestCmd(t *testing.T) { + b := newBuilderWithMockBackend() + command := "./executable" + + req := defaultDispatchReq(b, command) + err := cmd(req) + require.NoError(t, err) + + var expectedCommand strslice.StrSlice + if runtime.GOOS == "windows" { + expectedCommand = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", command)) + } else { + expectedCommand = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", command)) + } + + assert.Equal(t, expectedCommand, req.state.runConfig.Cmd) + assert.True(t, req.state.cmdSet) +} + +func TestHealthcheckNone(t *testing.T) { + b := newBuilderWithMockBackend() + + req := defaultDispatchReq(b, "NONE") + err := healthcheck(req) + require.NoError(t, err) + + require.NotNil(t, req.state.runConfig.Healthcheck) + assert.Equal(t, []string{"NONE"}, req.state.runConfig.Healthcheck.Test) +} + +func TestHealthcheckCmd(t *testing.T) { + b := newBuilderWithMockBackend() + + args := []string{"CMD", "curl", "-f", "http://localhost/", "||", "exit", "1"} + req := defaultDispatchReq(b, args...) + err := healthcheck(req) + require.NoError(t, err) + + require.NotNil(t, req.state.runConfig.Healthcheck) + expectedTest := []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"} + assert.Equal(t, expectedTest, req.state.runConfig.Healthcheck.Test) +} + +func TestEntrypoint(t *testing.T) { + b := newBuilderWithMockBackend() + entrypointCmd := "/usr/sbin/nginx" + + req := defaultDispatchReq(b, entrypointCmd) + err := entrypoint(req) + require.NoError(t, err) + require.NotNil(t, req.state.runConfig.Entrypoint) + + var expectedEntrypoint strslice.StrSlice + if runtime.GOOS == "windows" { + expectedEntrypoint = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", entrypointCmd)) + } else { + expectedEntrypoint = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", entrypointCmd)) + } + assert.Equal(t, expectedEntrypoint, req.state.runConfig.Entrypoint) +} + +func TestExpose(t *testing.T) { + b := newBuilderWithMockBackend() + + exposedPort := "80" + req := defaultDispatchReq(b, exposedPort) + err := expose(req) + require.NoError(t, err) + + require.NotNil(t, req.state.runConfig.ExposedPorts) + require.Len(t, req.state.runConfig.ExposedPorts, 1) + + portsMapping, err := nat.ParsePortSpec(exposedPort) + require.NoError(t, err) + assert.Contains(t, req.state.runConfig.ExposedPorts, portsMapping[0].Port) +} + +func TestUser(t *testing.T) { + b := newBuilderWithMockBackend() + userCommand := "foo" + + req := defaultDispatchReq(b, userCommand) + err := user(req) + require.NoError(t, err) + assert.Equal(t, userCommand, req.state.runConfig.User) +} + +func TestVolume(t *testing.T) { + b := newBuilderWithMockBackend() + + exposedVolume := "/foo" + + req := defaultDispatchReq(b, exposedVolume) + err := volume(req) + require.NoError(t, err) + + require.NotNil(t, req.state.runConfig.Volumes) + assert.Len(t, req.state.runConfig.Volumes, 1) + assert.Contains(t, req.state.runConfig.Volumes, exposedVolume) +} + +func TestStopSignal(t *testing.T) { + b := newBuilderWithMockBackend() + signal := "SIGKILL" + + req := defaultDispatchReq(b, signal) + err := stopSignal(req) + require.NoError(t, err) + assert.Equal(t, signal, req.state.runConfig.StopSignal) +} + +func TestArg(t *testing.T) { + b := newBuilderWithMockBackend() + + argName := "foo" + argVal := "bar" + argDef := fmt.Sprintf("%s=%s", argName, argVal) + + err := arg(defaultDispatchReq(b, argDef)) + require.NoError(t, err) + + expected := map[string]string{argName: argVal} + assert.Equal(t, expected, b.buildArgs.GetAllAllowed()) +} + +func TestShell(t *testing.T) { + b := newBuilderWithMockBackend() + + shellCmd := "powershell" + req := defaultDispatchReq(b, shellCmd) + req.attributes = map[string]bool{"json": true} + + err := shell(req) + require.NoError(t, err) + + expectedShell := strslice.StrSlice([]string{shellCmd}) + assert.Equal(t, expectedShell, req.state.runConfig.Shell) +} + +func TestParseOptInterval(t *testing.T) { + flInterval := &Flag{ + name: "interval", + flagType: stringType, + Value: "50ns", + } + _, err := parseOptInterval(flInterval) + testutil.ErrorContains(t, err, "cannot be less than 1ms") + + flInterval.Value = "1ms" + _, err = parseOptInterval(flInterval) + require.NoError(t, err) +} + +func TestPrependEnvOnCmd(t *testing.T) { + buildArgs := newBuildArgs(nil) + buildArgs.AddArg("NO_PROXY", nil) + + args := []string{"sorted=nope", "args=not", "http_proxy=foo", "NO_PROXY=YA"} + cmd := []string{"foo", "bar"} + cmdWithEnv := prependEnvOnCmd(buildArgs, args, cmd) + expected := strslice.StrSlice([]string{ + "|3", "NO_PROXY=YA", "args=not", "sorted=nope", "foo", "bar"}) + assert.Equal(t, expected, cmdWithEnv) +} + +func TestRunWithBuildArgs(t *testing.T) { + b := newBuilderWithMockBackend() + b.buildArgs.argsFromOptions["HTTP_PROXY"] = strPtr("FOO") + b.disableCommit = false + + runConfig := &container.Config{} + origCmd := strslice.StrSlice([]string{"cmd", "in", "from", "image"}) + cmdWithShell := strslice.StrSlice(append(getShell(runConfig), "echo foo")) + envVars := []string{"|1", "one=two"} + cachedCmd := strslice.StrSlice(append(envVars, cmdWithShell...)) + + imageCache := &mockImageCache{ + getCacheFunc: func(parentID string, cfg *container.Config) (string, error) { + // Check the runConfig.Cmd sent to probeCache() + assert.Equal(t, cachedCmd, cfg.Cmd) + assert.Equal(t, strslice.StrSlice(nil), cfg.Entrypoint) + return "", nil + }, + } + b.imageCache = imageCache + + mockBackend := b.docker.(*MockBackend) + mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ReleaseableLayer, error) { + return &mockImage{ + id: "abcdef", + config: &container.Config{Cmd: origCmd}, + }, nil, nil + } + mockBackend.containerCreateFunc = func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { + // Check the runConfig.Cmd sent to create() + assert.Equal(t, cmdWithShell, config.Config.Cmd) + assert.Contains(t, config.Config.Env, "one=two") + assert.Equal(t, strslice.StrSlice{""}, config.Config.Entrypoint) + return container.ContainerCreateCreatedBody{ID: "12345"}, nil + } + mockBackend.commitFunc = func(cID string, cfg *backend.ContainerCommitConfig) (string, error) { + // Check the runConfig.Cmd sent to commit() + assert.Equal(t, origCmd, cfg.Config.Cmd) + assert.Equal(t, cachedCmd, cfg.ContainerConfig.Cmd) + assert.Equal(t, strslice.StrSlice(nil), cfg.Config.Entrypoint) + return "", nil + } + + req := defaultDispatchReq(b, "abcdef") + require.NoError(t, from(req)) + b.buildArgs.AddArg("one", strPtr("two")) + + req.args = []string{"echo foo"} + require.NoError(t, run(req)) + + // Check that runConfig.Cmd has not been modified by run + assert.Equal(t, origCmd, req.state.runConfig.Cmd) +} diff --git a/components/engine/builder/dockerfile/dispatchers_unix.go b/components/engine/builder/dockerfile/dispatchers_unix.go new file mode 100644 index 0000000000..62ee371dfe --- /dev/null +++ b/components/engine/builder/dockerfile/dispatchers_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package dockerfile + +import ( + "errors" + "fmt" + "os" + "path/filepath" +) + +// normaliseWorkdir normalises a user requested working directory in a +// platform semantically consistent way. +func normaliseWorkdir(current string, requested string) (string, error) { + if requested == "" { + return "", errors.New("cannot normalise nothing") + } + current = filepath.FromSlash(current) + requested = filepath.FromSlash(requested) + if !filepath.IsAbs(requested) { + return filepath.Join(string(os.PathSeparator), current, requested), nil + } + return requested, nil +} + +func errNotJSON(command, _ string) error { + return fmt.Errorf("%s requires the arguments to be in JSON form", command) +} + +// equalEnvKeys compare two strings and returns true if they are equal. On +// Windows this comparison is case insensitive. +func equalEnvKeys(from, to string) bool { + return from == to +} diff --git a/components/engine/builder/dockerfile/dispatchers_unix_test.go b/components/engine/builder/dockerfile/dispatchers_unix_test.go new file mode 100644 index 0000000000..4aae6b460e --- /dev/null +++ b/components/engine/builder/dockerfile/dispatchers_unix_test.go @@ -0,0 +1,33 @@ +// +build !windows + +package dockerfile + +import ( + "testing" +) + +func TestNormaliseWorkdir(t *testing.T) { + testCases := []struct{ current, requested, expected, expectedError string }{ + {``, ``, ``, `cannot normalise nothing`}, + {``, `foo`, `/foo`, ``}, + {``, `/foo`, `/foo`, ``}, + {`/foo`, `bar`, `/foo/bar`, ``}, + {`/foo`, `/bar`, `/bar`, ``}, + } + + for _, test := range testCases { + normalised, err := normaliseWorkdir(test.current, test.requested) + + if test.expectedError != "" && err == nil { + t.Fatalf("NormaliseWorkdir should return an error %s, got nil", test.expectedError) + } + + if test.expectedError != "" && err.Error() != test.expectedError { + t.Fatalf("NormaliseWorkdir returned wrong error. Expected %s, got %s", test.expectedError, err.Error()) + } + + if normalised != test.expected { + t.Fatalf("NormaliseWorkdir error. Expected %s for current %s and requested %s, got %s", test.expected, test.current, test.requested, normalised) + } + } +} diff --git a/components/engine/builder/dockerfile/dispatchers_windows.go b/components/engine/builder/dockerfile/dispatchers_windows.go new file mode 100644 index 0000000000..71f7c9288f --- /dev/null +++ b/components/engine/builder/dockerfile/dispatchers_windows.go @@ -0,0 +1,93 @@ +package dockerfile + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/pkg/system" +) + +var pattern = regexp.MustCompile(`^[a-zA-Z]:\.$`) + +// normaliseWorkdir normalises a user requested working directory in a +// platform semantically consistent way. +func normaliseWorkdir(current string, requested string) (string, error) { + if requested == "" { + return "", errors.New("cannot normalise nothing") + } + + // `filepath.Clean` will replace "" with "." so skip in that case + if current != "" { + current = filepath.Clean(current) + } + if requested != "" { + requested = filepath.Clean(requested) + } + + // If either current or requested in Windows is: + // C: + // C:. + // then an error will be thrown as the definition for the above + // refers to `current directory on drive C:` + // Since filepath.Clean() will automatically normalize the above + // to `C:.`, we only need to check the last format + if pattern.MatchString(current) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", current) + } + if pattern.MatchString(requested) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", requested) + } + + // Target semantics is C:\somefolder, specifically in the format: + // UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already + // guaranteed that `current`, if set, is consistent. This allows us to + // cope correctly with any of the following in a Dockerfile: + // WORKDIR a --> C:\a + // WORKDIR c:\\foo --> C:\foo + // WORKDIR \\foo --> C:\foo + // WORKDIR /foo --> C:\foo + // WORKDIR c:\\foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR \\bar --> C:\foo --> C:\bar + // WORKDIR /foo \ WORKDIR c:/bar --> C:\foo --> C:\bar + if len(current) == 0 || system.IsAbs(requested) { + if (requested[0] == os.PathSeparator) || + (len(requested) > 1 && string(requested[1]) != ":") || + (len(requested) == 1) { + requested = filepath.Join(`C:\`, requested) + } + } else { + requested = filepath.Join(current, requested) + } + // Upper-case drive letter + return (strings.ToUpper(string(requested[0])) + requested[1:]), nil +} + +func errNotJSON(command, original string) error { + // For Windows users, give a hint if it looks like it might contain + // a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"], + // as JSON must be escaped. Unfortunate... + // + // Specifically looking for quote-driveletter-colon-backslash, there's no + // double backslash and a [] pair. No, this is not perfect, but it doesn't + // have to be. It's simply a hint to make life a little easier. + extra := "" + original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1))) + if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 && + !strings.Contains(original, `\\`) && + strings.Contains(original, "[") && + strings.Contains(original, "]") { + extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) + } + return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) +} + +// equalEnvKeys compare two strings and returns true if they are equal. On +// Windows this comparison is case insensitive. +func equalEnvKeys(from, to string) bool { + return strings.ToUpper(from) == strings.ToUpper(to) +} diff --git a/components/engine/builder/dockerfile/dispatchers_windows_test.go b/components/engine/builder/dockerfile/dispatchers_windows_test.go new file mode 100644 index 0000000000..3319c06582 --- /dev/null +++ b/components/engine/builder/dockerfile/dispatchers_windows_test.go @@ -0,0 +1,40 @@ +// +build windows + +package dockerfile + +import "testing" + +func TestNormaliseWorkdir(t *testing.T) { + tests := []struct{ current, requested, expected, etext string }{ + {``, ``, ``, `cannot normalise nothing`}, + {``, `C:`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {``, `C:.`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {`c:`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {`c:.`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {``, `a`, `C:\a`, ``}, + {``, `c:\foo`, `C:\foo`, ``}, + {``, `c:\\foo`, `C:\foo`, ``}, + {``, `\foo`, `C:\foo`, ``}, + {``, `\\foo`, `C:\foo`, ``}, + {``, `/foo`, `C:\foo`, ``}, + {``, `C:/foo`, `C:\foo`, ``}, + {`C:\foo`, `bar`, `C:\foo\bar`, ``}, + {`C:\foo`, `/bar`, `C:\bar`, ``}, + {`C:\foo`, `\bar`, `C:\bar`, ``}, + } + for _, i := range tests { + r, e := normaliseWorkdir(i.current, i.requested) + + if i.etext != "" && e == nil { + t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got no error", i.etext, i.current, i.requested) + } + + if i.etext != "" && e.Error() != i.etext { + t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got %s", i.etext, i.current, i.requested, e.Error()) + } + + if r != i.expected { + t.Fatalf("TestNormaliseWorkingDir Expected '%s' for '%s' '%s', got '%s'", i.expected, i.current, i.requested, r) + } + } +} diff --git a/components/engine/builder/dockerfile/envVarTest b/components/engine/builder/dockerfile/envVarTest new file mode 100644 index 0000000000..946b278592 --- /dev/null +++ b/components/engine/builder/dockerfile/envVarTest @@ -0,0 +1,121 @@ +A|hello | hello +A|he'll'o | hello +A|he'llo | error +A|he\'llo | he'llo +A|he\\'llo | error +A|abc\tdef | abctdef +A|"abc\tdef" | abc\tdef +A|"abc\\tdef" | abc\tdef +A|'abc\tdef' | abc\tdef +A|hello\ | hello +A|hello\\ | hello\ +A|"hello | error +A|"hello\" | error +A|"hel'lo" | hel'lo +A|'hello | error +A|'hello\' | hello\ +A|'hello\there' | hello\there +A|'hello\\there' | hello\\there +A|"''" | '' +A|$. | $. +A|$1 | +A|he$1x | hex +A|he$.x | he$.x +# Next one is different on Windows as $pwd==$PWD +U|he$pwd. | he. +W|he$pwd. | he/home. +A|he$PWD | he/home +A|he\$PWD | he$PWD +A|he\\$PWD | he\/home +A|"he\$PWD" | he$PWD +A|"he\\$PWD" | he\/home +A|he\${} | he${} +A|he\${}xx | he${}xx +A|he${} | he +A|he${}xx | hexx +A|he${hi} | he +A|he${hi}xx | hexx +A|he${PWD} | he/home +A|he${.} | error +A|he${XXX:-000}xx | he000xx +A|he${PWD:-000}xx | he/homexx +A|he${XXX:-$PWD}xx | he/homexx +A|he${XXX:-${PWD:-yyy}}xx | he/homexx +A|he${XXX:-${YYY:-yyy}}xx | heyyyxx +A|he${XXX:YYY} | error +A|he${XXX:+${PWD}}xx | hexx +A|he${PWD:+${XXX}}xx | hexx +A|he${PWD:+${SHELL}}xx | hebashxx +A|he${XXX:+000}xx | hexx +A|he${PWD:+000}xx | he000xx +A|'he${XX}' | he${XX} +A|"he${PWD}" | he/home +A|"he'$PWD'" | he'/home' +A|"$PWD" | /home +A|'$PWD' | $PWD +A|'\$PWD' | \$PWD +A|'"hello"' | "hello" +A|he\$PWD | he$PWD +A|"he\$PWD" | he$PWD +A|'he\$PWD' | he\$PWD +A|he${PWD | error +A|he${PWD:=000}xx | error +A|he${PWD:+${PWD}:}xx | he/home:xx +A|he${XXX:-\$PWD:}xx | he$PWD:xx +A|he${XXX:-\${PWD}z}xx | he${PWDz}xx +A|안녕하세요 | 안녕하세요 +A|안'녕'하세요 | 안녕하세요 +A|안'녕하세요 | error +A|안녕\'하세요 | 안녕'하세요 +A|안\\'녕하세요 | error +A|안녕\t하세요 | 안녕t하세요 +A|"안녕\t하세요" | 안녕\t하세요 +A|'안녕\t하세요 | error +A|안녕하세요\ | 안녕하세요 +A|안녕하세요\\ | 안녕하세요\ +A|"안녕하세요 | error +A|"안녕하세요\" | error +A|"안녕'하세요" | 안녕'하세요 +A|'안녕하세요 | error +A|'안녕하세요\' | 안녕하세요\ +A|안녕$1x | 안녕x +A|안녕$.x | 안녕$.x +# Next one is different on Windows as $pwd==$PWD +U|안녕$pwd. | 안녕. +W|안녕$pwd. | 안녕/home. +A|안녕$PWD | 안녕/home +A|안녕\$PWD | 안녕$PWD +A|안녕\\$PWD | 안녕\/home +A|안녕\${} | 안녕${} +A|안녕\${}xx | 안녕${}xx +A|안녕${} | 안녕 +A|안녕${}xx | 안녕xx +A|안녕${hi} | 안녕 +A|안녕${hi}xx | 안녕xx +A|안녕${PWD} | 안녕/home +A|안녕${.} | error +A|안녕${XXX:-000}xx | 안녕000xx +A|안녕${PWD:-000}xx | 안녕/homexx +A|안녕${XXX:-$PWD}xx | 안녕/homexx +A|안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx +A|안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx +A|안녕${XXX:YYY} | error +A|안녕${XXX:+${PWD}}xx | 안녕xx +A|안녕${PWD:+${XXX}}xx | 안녕xx +A|안녕${PWD:+${SHELL}}xx | 안녕bashxx +A|안녕${XXX:+000}xx | 안녕xx +A|안녕${PWD:+000}xx | 안녕000xx +A|'안녕${XX}' | 안녕${XX} +A|"안녕${PWD}" | 안녕/home +A|"안녕'$PWD'" | 안녕'/home' +A|'"안녕"' | "안녕" +A|안녕\$PWD | 안녕$PWD +A|"안녕\$PWD" | 안녕$PWD +A|'안녕\$PWD' | 안녕\$PWD +A|안녕${PWD | error +A|안녕${PWD:=000}xx | error +A|안녕${PWD:+${PWD}:}xx | 안녕/home:xx +A|안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx +A|안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx +A|$KOREAN | 한국어 +A|안녕$KOREAN | 안녕한국어 diff --git a/components/engine/builder/dockerfile/evaluator.go b/components/engine/builder/dockerfile/evaluator.go new file mode 100644 index 0000000000..deeda66b05 --- /dev/null +++ b/components/engine/builder/dockerfile/evaluator.go @@ -0,0 +1,323 @@ +// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling newBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of responsibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package dockerfile + +import ( + "bytes" + "fmt" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig/opts" + "github.com/pkg/errors" +) + +// Environment variable interpolation will happen on these statements only. +var replaceEnvAllowed = map[string]bool{ + command.Env: true, + command.Label: true, + command.Add: true, + command.Copy: true, + command.Workdir: true, + command.Expose: true, + command.Volume: true, + command.User: true, + command.StopSignal: true, + command.Arg: true, +} + +// Certain commands are allowed to have their args split into more +// words after env var replacements. Meaning: +// ENV foo="123 456" +// EXPOSE $foo +// should result in the same thing as: +// EXPOSE 123 456 +// and not treat "123 456" as a single word. +// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. +// Quotes will cause it to still be treated as single word. +var allowWordExpansion = map[string]bool{ + command.Expose: true, +} + +type dispatchRequest struct { + builder *Builder // TODO: replace this with a smaller interface + args []string + attributes map[string]bool + flags *BFlags + original string + shlex *ShellLex + state *dispatchState + source builder.Source +} + +func newDispatchRequestFromOptions(options dispatchOptions, builder *Builder, args []string) dispatchRequest { + return dispatchRequest{ + builder: builder, + args: args, + attributes: options.node.Attributes, + original: options.node.Original, + flags: NewBFlagsWithArgs(options.node.Flags), + shlex: options.shlex, + state: options.state, + source: options.source, + } +} + +type dispatcher func(dispatchRequest) error + +var evaluateTable map[string]dispatcher + +func init() { + evaluateTable = map[string]dispatcher{ + command.Add: add, + command.Arg: arg, + command.Cmd: cmd, + command.Copy: dispatchCopy, // copy() is a go builtin + command.Entrypoint: entrypoint, + command.Env: env, + command.Expose: expose, + command.From: from, + command.Healthcheck: healthcheck, + command.Label: label, + command.Maintainer: maintainer, + command.Onbuild: onbuild, + command.Run: run, + command.Shell: shell, + command.StopSignal: stopSignal, + command.User: user, + command.Volume: volume, + command.Workdir: workdir, + } +} + +func formatStep(stepN int, stepTotal int) string { + return fmt.Sprintf("%d/%d", stepN+1, stepTotal) +} + +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the Builder object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statement, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. +func (b *Builder) dispatch(options dispatchOptions) (*dispatchState, error) { + node := options.node + cmd := node.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + buildsFailed.WithValues(metricsCommandNotSupportedError).Inc() + return nil, err + } + + msg := bytes.NewBufferString(fmt.Sprintf("Step %s : %s%s", + options.stepMsg, upperCasedCmd, formatFlags(node.Flags))) + + args := []string{} + ast := node + if cmd == command.Onbuild { + var err error + ast, args, err = handleOnBuildNode(node, msg) + if err != nil { + return nil, err + } + } + + runConfigEnv := options.state.runConfig.Env + envs := append(runConfigEnv, b.buildArgs.FilterAllowed(runConfigEnv)...) + processFunc := createProcessWordFunc(options.shlex, cmd, envs) + words, err := getDispatchArgsFromNode(ast, processFunc, msg) + if err != nil { + buildsFailed.WithValues(metricsErrorProcessingCommandsError).Inc() + return nil, err + } + args = append(args, words...) + + fmt.Fprintln(b.Stdout, msg.String()) + + f, ok := evaluateTable[cmd] + if !ok { + buildsFailed.WithValues(metricsUnknownInstructionError).Inc() + return nil, fmt.Errorf("unknown instruction: %s", upperCasedCmd) + } + if err := f(newDispatchRequestFromOptions(options, b, args)); err != nil { + return nil, err + } + options.state.updateRunConfig() + return options.state, nil +} + +type dispatchOptions struct { + state *dispatchState + stepMsg string + node *parser.Node + shlex *ShellLex + source builder.Source +} + +// dispatchState is a data object which is modified by dispatchers +type dispatchState struct { + runConfig *container.Config + maintainer string + cmdSet bool + imageID string + baseImage builder.Image + stageName string +} + +func newDispatchState() *dispatchState { + return &dispatchState{runConfig: &container.Config{}} +} + +func (s *dispatchState) updateRunConfig() { + s.runConfig.Image = s.imageID +} + +// hasFromImage returns true if the builder has processed a `FROM ` line +func (s *dispatchState) hasFromImage() bool { + return s.imageID != "" || (s.baseImage != nil && s.baseImage.ImageID() == "") +} + +func (s *dispatchState) isCurrentStage(target string) bool { + if target == "" { + return false + } + return strings.EqualFold(s.stageName, target) +} + +func (s *dispatchState) beginStage(stageName string, image builder.Image) { + s.stageName = stageName + s.imageID = image.ImageID() + + if image.RunConfig() != nil { + s.runConfig = image.RunConfig() + } else { + s.runConfig = &container.Config{} + } + s.baseImage = image + s.setDefaultPath() +} + +// Add the default PATH to runConfig.ENV if one exists for the platform and there +// is no PATH set. Note that windows won't have one as it's set by HCS +func (s *dispatchState) setDefaultPath() { + if system.DefaultPathEnv == "" { + return + } + envMap := opts.ConvertKVStringsToMap(s.runConfig.Env) + if _, ok := envMap["PATH"]; !ok { + s.runConfig.Env = append(s.runConfig.Env, "PATH="+system.DefaultPathEnv) + } +} + +func handleOnBuildNode(ast *parser.Node, msg *bytes.Buffer) (*parser.Node, []string, error) { + if ast.Next == nil { + return nil, nil, errors.New("ONBUILD requires at least one argument") + } + ast = ast.Next.Children[0] + msg.WriteString(" " + ast.Value + formatFlags(ast.Flags)) + return ast, []string{ast.Value}, nil +} + +func formatFlags(flags []string) string { + if len(flags) > 0 { + return " " + strings.Join(flags, " ") + } + return "" +} + +func getDispatchArgsFromNode(ast *parser.Node, processFunc processWordFunc, msg *bytes.Buffer) ([]string, error) { + args := []string{} + for i := 0; ast.Next != nil; i++ { + ast = ast.Next + words, err := processFunc(ast.Value) + if err != nil { + return nil, err + } + args = append(args, words...) + msg.WriteString(" " + ast.Value) + } + return args, nil +} + +type processWordFunc func(string) ([]string, error) + +func createProcessWordFunc(shlex *ShellLex, cmd string, envs []string) processWordFunc { + switch { + case !replaceEnvAllowed[cmd]: + return func(word string) ([]string, error) { + return []string{word}, nil + } + case allowWordExpansion[cmd]: + return func(word string) ([]string, error) { + return shlex.ProcessWords(word, envs) + } + default: + return func(word string) ([]string, error) { + word, err := shlex.ProcessWord(word, envs) + return []string{word}, err + } + } +} + +// checkDispatch does a simple check for syntax errors of the Dockerfile. +// Because some of the instructions can only be validated through runtime, +// arg, env, etc., this syntax check will not be complete and could not replace +// the runtime check. Instead, this function is only a helper that allows +// user to find out the obvious error in Dockerfile earlier on. +func checkDispatch(ast *parser.Node) error { + cmd := ast.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + return err + } + + // The instruction itself is ONBUILD, we will make sure it follows with at + // least one argument + if upperCasedCmd == "ONBUILD" { + if ast.Next == nil { + buildsFailed.WithValues(metricsMissingOnbuildArgumentsError).Inc() + return errors.New("ONBUILD requires at least one argument") + } + } + + if _, ok := evaluateTable[cmd]; ok { + return nil + } + buildsFailed.WithValues(metricsUnknownInstructionError).Inc() + return errors.Errorf("unknown instruction: %s", upperCasedCmd) +} diff --git a/components/engine/builder/dockerfile/evaluator_test.go b/components/engine/builder/dockerfile/evaluator_test.go new file mode 100644 index 0000000000..544a93fc30 --- /dev/null +++ b/components/engine/builder/dockerfile/evaluator_test.go @@ -0,0 +1,210 @@ +package dockerfile + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +type dispatchTestCase struct { + name, dockerfile, expectedError string + files map[string]string +} + +func init() { + reexec.Init() +} + +func initDispatchTestCases() []dispatchTestCase { + dispatchTestCases := []dispatchTestCase{{ + name: "copyEmptyWhitespace", + dockerfile: `COPY + quux \ + bar`, + expectedError: "COPY requires at least two arguments", + }, + { + name: "ONBUILD forbidden FROM", + dockerfile: "ONBUILD FROM scratch", + expectedError: "FROM isn't allowed as an ONBUILD trigger", + files: nil, + }, + { + name: "ONBUILD forbidden MAINTAINER", + dockerfile: "ONBUILD MAINTAINER docker.io", + expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger", + files: nil, + }, + { + name: "ARG two arguments", + dockerfile: "ARG foo bar", + expectedError: "ARG requires exactly one argument", + files: nil, + }, + { + name: "MAINTAINER unknown flag", + dockerfile: "MAINTAINER --boo joe@example.com", + expectedError: "Unknown flag: boo", + files: nil, + }, + { + name: "ADD multiple files to file", + dockerfile: "ADD file1.txt file2.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON ADD multiple files to file", + dockerfile: `ADD ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard ADD multiple files to file", + dockerfile: "ADD file*.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard JSON ADD multiple files to file", + dockerfile: `ADD ["file*.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file", + dockerfile: "COPY file1.txt file2.txt test", + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON COPY multiple files to file", + dockerfile: `COPY ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "ADD multiple files to file with whitespace", + dockerfile: `ADD [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file with whitespace", + dockerfile: `COPY [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY wildcard no files", + dockerfile: `COPY file*.txt /tmp/`, + expectedError: "COPY failed: no source files were specified", + files: nil, + }, + { + name: "COPY url", + dockerfile: `COPY https://index.docker.io/robots.txt /`, + expectedError: "source can't be a URL for COPY", + files: nil, + }, + { + name: "Chaining ONBUILD", + dockerfile: `ONBUILD ONBUILD RUN touch foobar`, + expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", + files: nil, + }, + { + name: "Invalid instruction", + dockerfile: `foo bar`, + expectedError: "unknown instruction: FOO", + files: nil, + }} + + return dispatchTestCases +} + +func TestDispatch(t *testing.T) { + testCases := initDispatchTestCases() + + for _, testCase := range testCases { + executeTestCase(t, testCase) + } +} + +func executeTestCase(t *testing.T, testCase dispatchTestCase) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + for filename, content := range testCase.files { + createTestTempFile(t, contextDir, filename, content, 0777) + } + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("Error when creating tar stream: %s", err) + } + + defer func() { + if err = tarStream.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + }() + + context, err := remotecontext.MakeTarSumContext(tarStream) + + if err != nil { + t.Fatalf("Error when creating tar context: %s", err) + } + + defer func() { + if err = context.Close(); err != nil { + t.Fatalf("Error when closing tar context: %s", err) + } + }() + + r := strings.NewReader(testCase.dockerfile) + result, err := parser.Parse(r) + + if err != nil { + t.Fatalf("Error when parsing Dockerfile: %s", err) + } + + options := &types.ImageBuildOptions{ + BuildArgs: make(map[string]*string), + } + + b := &Builder{ + options: options, + Stdout: ioutil.Discard, + buildArgs: newBuildArgs(options.BuildArgs), + } + + shlex := NewShellLex(parser.DefaultEscapeToken) + n := result.AST + state := &dispatchState{runConfig: &container.Config{}} + opts := dispatchOptions{ + state: state, + stepMsg: formatStep(0, len(n.Children)), + node: n.Children[0], + shlex: shlex, + source: context, + } + state, err = b.dispatch(opts) + + if err == nil { + t.Fatalf("No error when executing test %s", testCase.name) + } + + if !strings.Contains(err.Error(), testCase.expectedError) { + t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", testCase.expectedError, err.Error()) + } + +} diff --git a/components/engine/builder/dockerfile/evaluator_unix.go b/components/engine/builder/dockerfile/evaluator_unix.go new file mode 100644 index 0000000000..28fd5b156b --- /dev/null +++ b/components/engine/builder/dockerfile/evaluator_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package dockerfile + +// platformSupports is a short-term function to give users a quality error +// message if a Dockerfile uses a command not supported on the platform. +func platformSupports(command string) error { + return nil +} diff --git a/components/engine/builder/dockerfile/evaluator_windows.go b/components/engine/builder/dockerfile/evaluator_windows.go new file mode 100644 index 0000000000..72483a2ec8 --- /dev/null +++ b/components/engine/builder/dockerfile/evaluator_windows.go @@ -0,0 +1,13 @@ +package dockerfile + +import "fmt" + +// platformSupports is gives users a quality error message if a Dockerfile uses +// a command not supported on the platform. +func platformSupports(command string) error { + switch command { + case "stopsignal": + return fmt.Errorf("The daemon on this platform does not support the command '%s'", command) + } + return nil +} diff --git a/components/engine/builder/dockerfile/imagecontext.go b/components/engine/builder/dockerfile/imagecontext.go new file mode 100644 index 0000000000..f89cc8d9ce --- /dev/null +++ b/components/engine/builder/dockerfile/imagecontext.go @@ -0,0 +1,192 @@ +package dockerfile + +import ( + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type buildStage struct { + id string + config *container.Config +} + +func newBuildStageFromImage(image builder.Image) *buildStage { + return &buildStage{id: image.ImageID(), config: image.RunConfig()} +} + +func (b *buildStage) ImageID() string { + return b.id +} + +func (b *buildStage) RunConfig() *container.Config { + return b.config +} + +func (b *buildStage) update(imageID string, runConfig *container.Config) { + b.id = imageID + b.config = runConfig +} + +var _ builder.Image = &buildStage{} + +// buildStages tracks each stage of a build so they can be retrieved by index +// or by name. +type buildStages struct { + sequence []*buildStage + byName map[string]*buildStage +} + +func newBuildStages() *buildStages { + return &buildStages{byName: make(map[string]*buildStage)} +} + +func (s *buildStages) getByName(name string) (builder.Image, bool) { + stage, ok := s.byName[strings.ToLower(name)] + return stage, ok +} + +func (s *buildStages) get(indexOrName string) (builder.Image, error) { + index, err := strconv.Atoi(indexOrName) + if err == nil { + if err := s.validateIndex(index); err != nil { + return nil, err + } + return s.sequence[index], nil + } + if im, ok := s.byName[strings.ToLower(indexOrName)]; ok { + return im, nil + } + return nil, nil +} + +func (s *buildStages) validateIndex(i int) error { + if i < 0 || i >= len(s.sequence)-1 { + if i == len(s.sequence)-1 { + return errors.New("refers to current build stage") + } + return errors.New("index out of bounds") + } + return nil +} + +func (s *buildStages) add(name string, image builder.Image) error { + stage := newBuildStageFromImage(image) + name = strings.ToLower(name) + if len(name) > 0 { + if _, ok := s.byName[name]; ok { + return errors.Errorf("duplicate name %s", name) + } + s.byName[name] = stage + } + s.sequence = append(s.sequence, stage) + return nil +} + +func (s *buildStages) update(imageID string, runConfig *container.Config) { + s.sequence[len(s.sequence)-1].update(imageID, runConfig) +} + +type getAndMountFunc func(string) (builder.Image, builder.ReleaseableLayer, error) + +// imageSources mounts images and provides a cache for mounted images. It tracks +// all images so they can be unmounted at the end of the build. +type imageSources struct { + byImageID map[string]*imageMount + getImage getAndMountFunc + cache pathCache // TODO: remove +} + +func newImageSources(ctx context.Context, options builderOptions) *imageSources { + getAndMount := func(idOrRef string) (builder.Image, builder.ReleaseableLayer, error) { + return options.Backend.GetImageAndReleasableLayer(ctx, idOrRef, backend.GetImageAndLayerOptions{ + ForcePull: options.Options.PullParent, + AuthConfig: options.Options.AuthConfigs, + Output: options.ProgressWriter.Output, + }) + } + + return &imageSources{ + byImageID: make(map[string]*imageMount), + getImage: getAndMount, + } +} + +func (m *imageSources) Get(idOrRef string) (*imageMount, error) { + if im, ok := m.byImageID[idOrRef]; ok { + return im, nil + } + + image, layer, err := m.getImage(idOrRef) + if err != nil { + return nil, err + } + im := newImageMount(image, layer) + m.byImageID[image.ImageID()] = im + return im, nil +} + +func (m *imageSources) Unmount() (retErr error) { + for _, im := range m.byImageID { + if err := im.unmount(); err != nil { + logrus.Error(err) + retErr = err + } + } + return +} + +// imageMount is a reference to an image that can be used as a builder.Source +type imageMount struct { + image builder.Image + source builder.Source + layer builder.ReleaseableLayer +} + +func newImageMount(image builder.Image, layer builder.ReleaseableLayer) *imageMount { + im := &imageMount{image: image, layer: layer} + return im +} + +func (im *imageMount) Source() (builder.Source, error) { + if im.source == nil { + if im.layer == nil { + return nil, errors.Errorf("empty context") + } + mountPath, err := im.layer.Mount() + if err != nil { + return nil, errors.Wrapf(err, "failed to mount %s", im.image.ImageID()) + } + source, err := remotecontext.NewLazyContext(mountPath) + if err != nil { + return nil, errors.Wrapf(err, "failed to create lazycontext for %s", mountPath) + } + im.source = source + } + return im.source, nil +} + +func (im *imageMount) unmount() error { + if im.layer == nil { + return nil + } + if err := im.layer.Release(); err != nil { + return errors.Wrapf(err, "failed to unmount previous build image %s", im.image.ImageID()) + } + return nil +} + +func (im *imageMount) Image() builder.Image { + return im.image +} + +func (im *imageMount) ImageID() string { + return im.image.ImageID() +} diff --git a/components/engine/builder/dockerfile/internals.go b/components/engine/builder/dockerfile/internals.go new file mode 100644 index 0000000000..42fe897b9e --- /dev/null +++ b/components/engine/builder/dockerfile/internals.go @@ -0,0 +1,351 @@ +package dockerfile + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" +) + +func (b *Builder) commit(dispatchState *dispatchState, comment string) error { + if b.disableCommit { + return nil + } + if !dispatchState.hasFromImage() { + return errors.New("Please provide a source image with `from` prior to commit") + } + + runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment)) + hit, err := b.probeCache(dispatchState, runConfigWithCommentCmd) + if err != nil || hit { + return err + } + id, err := b.create(runConfigWithCommentCmd) + if err != nil { + return err + } + + return b.commitContainer(dispatchState, id, runConfigWithCommentCmd) +} + +// TODO: see if any args can be dropped +func (b *Builder) commitContainer(dispatchState *dispatchState, id string, containerConfig *container.Config) error { + if b.disableCommit { + return nil + } + + commitCfg := &backend.ContainerCommitConfig{ + ContainerCommitConfig: types.ContainerCommitConfig{ + Author: dispatchState.maintainer, + Pause: true, + // TODO: this should be done by Commit() + Config: copyRunConfig(dispatchState.runConfig), + }, + ContainerConfig: containerConfig, + } + + // Commit the container + imageID, err := b.docker.Commit(id, commitCfg) + if err != nil { + return err + } + + dispatchState.imageID = imageID + b.buildStages.update(imageID, dispatchState.runConfig) + return nil +} + +func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error { + srcHash := getSourceHashFromInfos(inst.infos) + + // TODO: should this have been using origPaths instead of srcHash in the comment? + runConfigWithCommentCmd := copyRunConfig( + state.runConfig, + withCmdCommentString(fmt.Sprintf("%s %s in %s ", inst.cmdName, srcHash, inst.dest))) + if hit, err := b.probeCache(state, runConfigWithCommentCmd); err != nil || hit { + return err + } + + container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ + Config: runConfigWithCommentCmd, + // Set a log config to override any default value set on the daemon + HostConfig: &container.HostConfig{LogConfig: defaultLogConfig}, + }) + if err != nil { + return err + } + b.tmpContainers[container.ID] = struct{}{} + + // Twiddle the destination when it's a relative path - meaning, make it + // relative to the WORKINGDIR + dest, err := normaliseDest(inst.cmdName, state.runConfig.WorkingDir, inst.dest) + if err != nil { + return err + } + + for _, info := range inst.infos { + if err := b.docker.CopyOnBuild(container.ID, dest, info.root, info.path, inst.allowLocalDecompression); err != nil { + return err + } + } + return b.commitContainer(state, container.ID, runConfigWithCommentCmd) +} + +// For backwards compat, if there's just one info then use it as the +// cache look-up string, otherwise hash 'em all into one +func getSourceHashFromInfos(infos []copyInfo) string { + if len(infos) == 1 { + return infos[0].hash + } + var hashs []string + for _, info := range infos { + hashs = append(hashs, info.hash) + } + return hashStringSlice("multi", hashs) +} + +func hashStringSlice(prefix string, slice []string) string { + hasher := sha256.New() + hasher.Write([]byte(strings.Join(slice, ","))) + return prefix + ":" + hex.EncodeToString(hasher.Sum(nil)) +} + +type runConfigModifier func(*container.Config) + +func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config { + copy := *runConfig + for _, modifier := range modifiers { + modifier(©) + } + return © +} + +func withCmd(cmd []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = cmd + } +} + +// withCmdComment sets Cmd to a nop comment string. See withCmdCommentString for +// why there are two almost identical versions of this. +func withCmdComment(comment string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig), "#(nop) ", comment) + } +} + +// withCmdCommentString exists to maintain compatibility with older versions. +// A few instructions (workdir, copy, add) used a nop comment that is a single arg +// where as all the other instructions used a two arg comment string. This +// function implements the single arg version. +func withCmdCommentString(comment string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig), "#(nop) "+comment) + } +} + +func withEnv(env []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Env = env + } +} + +// withEntrypointOverride sets an entrypoint on runConfig if the command is +// not empty. The entrypoint is left unmodified if command is empty. +// +// The dockerfile RUN instruction expect to run without an entrypoint +// so the runConfig entrypoint needs to be modified accordingly. ContainerCreate +// will change a []string{""} entrypoint to nil, so we probe the cache with the +// nil entrypoint. +func withEntrypointOverride(cmd []string, entrypoint []string) runConfigModifier { + return func(runConfig *container.Config) { + if len(cmd) > 0 { + runConfig.Entrypoint = entrypoint + } + } +} + +// getShell is a helper function which gets the right shell for prefixing the +// shell-form of RUN, ENTRYPOINT and CMD instructions +func getShell(c *container.Config) []string { + if 0 == len(c.Shell) { + return append([]string{}, defaultShell[:]...) + } + return append([]string{}, c.Shell[:]...) +} + +// probeCache checks if cache match can be found for current build instruction. +// If an image is found, probeCache returns `(true, nil)`. +// If no image is found, it returns `(false, nil)`. +// If there is any error, it returns `(false, err)`. +func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) { + c := b.imageCache + if c == nil || b.options.NoCache || b.cacheBusted { + return false, nil + } + cache, err := c.GetCache(dispatchState.imageID, runConfig) + if err != nil { + return false, err + } + if len(cache) == 0 { + logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd) + b.cacheBusted = true + return false, nil + } + + fmt.Fprint(b.Stdout, " ---> Using cache\n") + logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd) + dispatchState.imageID = string(cache) + b.buildStages.update(dispatchState.imageID, runConfig) + + return true, nil +} + +func (b *Builder) create(runConfig *container.Config) (string, error) { + resources := container.Resources{ + CgroupParent: b.options.CgroupParent, + CPUShares: b.options.CPUShares, + CPUPeriod: b.options.CPUPeriod, + CPUQuota: b.options.CPUQuota, + CpusetCpus: b.options.CPUSetCPUs, + CpusetMems: b.options.CPUSetMems, + Memory: b.options.Memory, + MemorySwap: b.options.MemorySwap, + Ulimits: b.options.Ulimits, + } + + // TODO: why not embed a hostconfig in builder? + hostConfig := &container.HostConfig{ + SecurityOpt: b.options.SecurityOpt, + Isolation: b.options.Isolation, + ShmSize: b.options.ShmSize, + Resources: resources, + NetworkMode: container.NetworkMode(b.options.NetworkMode), + // Set a log config to override any default value set on the daemon + LogConfig: defaultLogConfig, + ExtraHosts: b.options.ExtraHosts, + } + + // Create the container + c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ + Config: runConfig, + HostConfig: hostConfig, + }) + if err != nil { + return "", err + } + for _, warning := range c.Warnings { + fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) + } + + b.tmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) + return c.ID, nil +} + +var errCancelled = errors.New("build cancelled") + +func (b *Builder) run(cID string, cmd []string) (err error) { + attached := make(chan struct{}) + errCh := make(chan error) + go func() { + errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true, attached) + }() + + select { + case err := <-errCh: + return err + case <-attached: + } + + finished := make(chan struct{}) + cancelErrCh := make(chan error, 1) + go func() { + select { + case <-b.clientCtx.Done(): + logrus.Debugln("Build cancelled, killing and removing container:", cID) + b.docker.ContainerKill(cID, 0) + b.removeContainer(cID) + cancelErrCh <- errCancelled + case <-finished: + cancelErrCh <- nil + } + }() + + if err := b.docker.ContainerStart(cID, nil, "", ""); err != nil { + close(finished) + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v) and got an error from ContainerStart: %v", + cancelErr, err) + } + return err + } + + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + close(finished) + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v) and got an error from errCh: %v", + cancelErr, err) + } + return err + } + + waitC, err := b.docker.ContainerWait(b.clientCtx, cID, containerpkg.WaitConditionNotRunning) + if err != nil { + // Unable to begin waiting for container. + close(finished) + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v) and unable to begin ContainerWait: %d", cancelErr, err) + } + return err + } + + if status := <-waitC; status.ExitCode() != 0 { + close(finished) + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v) and got a non-zero code from ContainerWait: %d", cancelErr, status.ExitCode()) + } + // TODO: change error type, because jsonmessage.JSONError assumes HTTP + return &jsonmessage.JSONError{ + Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(cmd, " "), status.ExitCode()), + Code: status.ExitCode(), + } + } + close(finished) + return <-cancelErrCh +} + +func (b *Builder) removeContainer(c string) error { + rmConfig := &types.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := b.docker.ContainerRm(c, rmConfig); err != nil { + fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) + return err + } + return nil +} + +func (b *Builder) clearTmp() { + for c := range b.tmpContainers { + if err := b.removeContainer(c); err != nil { + return + } + delete(b.tmpContainers, c) + fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) + } +} diff --git a/components/engine/builder/dockerfile/internals_test.go b/components/engine/builder/dockerfile/internals_test.go new file mode 100644 index 0000000000..4cba2c9842 --- /dev/null +++ b/components/engine/builder/dockerfile/internals_test.go @@ -0,0 +1,130 @@ +package dockerfile + +import ( + "fmt" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEmptyDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777) + + readAndCheckDockerfile(t, "emptyDockerfile", contextDir, "", "the Dockerfile (Dockerfile) cannot be empty") +} + +func TestSymlinkDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + createTestSymlink(t, contextDir, builder.DefaultDockerfileName, "/etc/passwd") + + // The reason the error is "Cannot locate specified Dockerfile" is because + // in the builder, the symlink is resolved within the context, therefore + // Dockerfile -> /etc/passwd becomes etc/passwd from the context which is + // a nonexistent file. + expectedError := fmt.Sprintf("Cannot locate specified Dockerfile: %s", builder.DefaultDockerfileName) + + readAndCheckDockerfile(t, "symlinkDockerfile", contextDir, builder.DefaultDockerfileName, expectedError) +} + +func TestDockerfileOutsideTheBuildContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + expectedError := "Forbidden path outside the build context: ../../Dockerfile ()" + + readAndCheckDockerfile(t, "DockerfileOutsideTheBuildContext", contextDir, "../../Dockerfile", expectedError) +} + +func TestNonExistingDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + expectedError := "Cannot locate specified Dockerfile: Dockerfile" + + readAndCheckDockerfile(t, "NonExistingDockerfile", contextDir, "Dockerfile", expectedError) +} + +func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, expectedError string) { + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + require.NoError(t, err) + + defer func() { + if err = tarStream.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + }() + + if dockerfilePath == "" { // handled in BuildWithContext + dockerfilePath = builder.DefaultDockerfileName + } + + config := backend.BuildConfig{ + Options: &types.ImageBuildOptions{Dockerfile: dockerfilePath}, + Source: tarStream, + } + _, _, err = remotecontext.Detect(config) + assert.EqualError(t, err, expectedError) +} + +func TestCopyRunConfig(t *testing.T) { + defaultEnv := []string{"foo=1"} + defaultCmd := []string{"old"} + + var testcases = []struct { + doc string + modifiers []runConfigModifier + expected *container.Config + }{ + { + doc: "Set the command", + modifiers: []runConfigModifier{withCmd([]string{"new"})}, + expected: &container.Config{ + Cmd: []string{"new"}, + Env: defaultEnv, + }, + }, + { + doc: "Set the command to a comment", + modifiers: []runConfigModifier{withCmdComment("comment")}, + expected: &container.Config{ + Cmd: append(defaultShell, "#(nop) ", "comment"), + Env: defaultEnv, + }, + }, + { + doc: "Set the command and env", + modifiers: []runConfigModifier{ + withCmd([]string{"new"}), + withEnv([]string{"one", "two"}), + }, + expected: &container.Config{ + Cmd: []string{"new"}, + Env: []string{"one", "two"}, + }, + }, + } + + for _, testcase := range testcases { + runConfig := &container.Config{ + Cmd: defaultCmd, + Env: defaultEnv, + } + runConfigCopy := copyRunConfig(runConfig, testcase.modifiers...) + assert.Equal(t, testcase.expected, runConfigCopy, testcase.doc) + // Assert the original was not modified + assert.NotEqual(t, runConfig, runConfigCopy, testcase.doc) + } + +} diff --git a/components/engine/builder/dockerfile/internals_unix.go b/components/engine/builder/dockerfile/internals_unix.go new file mode 100644 index 0000000000..17102cb443 --- /dev/null +++ b/components/engine/builder/dockerfile/internals_unix.go @@ -0,0 +1,42 @@ +// +build !windows + +package dockerfile + +import ( + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// normaliseDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normaliseDest(cmdName, workingDir, requested string) (string, error) { + dest := filepath.FromSlash(requested) + endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator)) + if !system.IsAbs(requested) { + dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + return dest, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func validateCopySourcePath(imageSource *imageMount, origPath string) error { + return nil +} diff --git a/components/engine/builder/dockerfile/internals_windows.go b/components/engine/builder/dockerfile/internals_windows.go new file mode 100644 index 0000000000..81a0386251 --- /dev/null +++ b/components/engine/builder/dockerfile/internals_windows.go @@ -0,0 +1,95 @@ +package dockerfile + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// normaliseDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normaliseDest(cmdName, workingDir, requested string) (string, error) { + dest := filepath.FromSlash(requested) + endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator)) + + // We are guaranteed that the working directory is already consistent, + // However, Windows also has, for now, the limitation that ADD/COPY can + // only be done to the system drive, not any drives that might be present + // as a result of a bind mount. + // + // So... if the path requested is Linux-style absolute (/foo or \\foo), + // we assume it is the system drive. If it is a Windows-style absolute + // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we + // strip any configured working directories drive letter so that it + // can be subsequently legitimately converted to a Windows volume-style + // pathname. + + // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as + // we only want to validate where the DriveColon part has been supplied. + if filepath.IsAbs(dest) { + if strings.ToUpper(string(dest[0])) != "C" { + return "", fmt.Errorf("Windows does not support %s with a destinations not on the system drive (C:)", cmdName) + } + dest = dest[2:] // Strip the drive letter + } + + // Cannot handle relative where WorkingDir is not the system drive. + if len(workingDir) > 0 { + if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { + return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) + } + if !system.IsAbs(dest) { + if string(workingDir[0]) != "C" { + return "", fmt.Errorf("Windows does not support %s with relative paths when WORKDIR is not the system drive", cmdName) + } + dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + } + return dest, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +var pathBlacklist = map[string]bool{ + "c:\\": true, + "c:\\windows": true, +} + +func validateCopySourcePath(imageSource *imageMount, origPath string) error { + // validate windows paths from other images + if imageSource == nil { + return nil + } + origPath = filepath.FromSlash(origPath) + p := strings.ToLower(filepath.Clean(origPath)) + if !filepath.IsAbs(p) { + if filepath.VolumeName(p) != "" { + if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths + p = p[:len(p)-1] + } + p += "\\" + } else { + p = filepath.Join("c:\\", p) + } + } + if _, blacklisted := pathBlacklist[p]; blacklisted { + return errors.New("copy from c:\\ or c:\\windows is not allowed on windows") + } + return nil +} diff --git a/components/engine/builder/dockerfile/internals_windows_test.go b/components/engine/builder/dockerfile/internals_windows_test.go new file mode 100644 index 0000000000..868a6671a3 --- /dev/null +++ b/components/engine/builder/dockerfile/internals_windows_test.go @@ -0,0 +1,51 @@ +// +build windows + +package dockerfile + +import "testing" + +func TestNormaliseDest(t *testing.T) { + tests := []struct{ current, requested, expected, etext string }{ + {``, `D:\`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, + {``, `e:/`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, + {`invalid`, `./c1`, ``, `Current WorkingDir invalid is not platform consistent`}, + {`C:`, ``, ``, `Current WorkingDir C: is not platform consistent`}, + {`C`, ``, ``, `Current WorkingDir C is not platform consistent`}, + {`D:\`, `.`, ``, "Windows does not support TEST with relative paths when WORKDIR is not the system drive"}, + {``, `D`, `D`, ``}, + {``, `./a1`, `.\a1`, ``}, + {``, `.\b1`, `.\b1`, ``}, + {``, `/`, `\`, ``}, + {``, `\`, `\`, ``}, + {``, `c:/`, `\`, ``}, + {``, `c:\`, `\`, ``}, + {``, `.`, `.`, ``}, + {`C:\wdd`, `./a1`, `\wdd\a1`, ``}, + {`C:\wde`, `.\b1`, `\wde\b1`, ``}, + {`C:\wdf`, `/`, `\`, ``}, + {`C:\wdg`, `\`, `\`, ``}, + {`C:\wdh`, `c:/`, `\`, ``}, + {`C:\wdi`, `c:\`, `\`, ``}, + {`C:\wdj`, `.`, `\wdj`, ``}, + {`C:\wdk`, `foo/bar`, `\wdk\foo\bar`, ``}, + {`C:\wdl`, `foo\bar`, `\wdl\foo\bar`, ``}, + {`C:\wdm`, `foo/bar/`, `\wdm\foo\bar\`, ``}, + {`C:\wdn`, `foo\bar/`, `\wdn\foo\bar\`, ``}, + } + for _, i := range tests { + got, err := normaliseDest("TEST", i.current, i.requested) + if err != nil && i.etext == "" { + t.Fatalf("TestNormaliseDest Got unexpected error %q for %s %s. ", err.Error(), i.current, i.requested) + } + if i.etext != "" && ((err == nil) || (err != nil && err.Error() != i.etext)) { + if err == nil { + t.Fatalf("TestNormaliseDest Expected an error for %s %s but didn't get one", i.current, i.requested) + } else { + t.Fatalf("TestNormaliseDest Wrong error text for %s %s - %s", i.current, i.requested, err.Error()) + } + } + if i.etext == "" && got != i.expected { + t.Fatalf("TestNormaliseDest Expected %q for %q and %q. Got %q", i.expected, i.current, i.requested, got) + } + } +} diff --git a/components/engine/builder/dockerfile/metrics.go b/components/engine/builder/dockerfile/metrics.go new file mode 100644 index 0000000000..5aa953aa79 --- /dev/null +++ b/components/engine/builder/dockerfile/metrics.go @@ -0,0 +1,44 @@ +package dockerfile + +import ( + "github.com/docker/go-metrics" +) + +var ( + buildsTriggered metrics.Counter + buildsFailed metrics.LabeledCounter +) + +// Build metrics prometheus messages, these values must be initialized before +// using them. See the example below in the "builds_failed" metric definition. +const ( + metricsDockerfileSyntaxError = "dockerfile_syntax_error" + metricsDockerfileEmptyError = "dockerfile_empty_error" + metricsCommandNotSupportedError = "command_not_supported_error" + metricsErrorProcessingCommandsError = "error_processing_commands_error" + metricsBuildTargetNotReachableError = "build_target_not_reachable_error" + metricsMissingOnbuildArgumentsError = "missing_onbuild_arguments_error" + metricsUnknownInstructionError = "unknown_instruction_error" + metricsBuildCanceled = "build_canceled" +) + +func init() { + buildMetrics := metrics.NewNamespace("builder", "", nil) + + buildsTriggered = buildMetrics.NewCounter("builds_triggered", "Number of triggered image builds") + buildsFailed = buildMetrics.NewLabeledCounter("builds_failed", "Number of failed image builds", "reason") + for _, r := range []string{ + metricsDockerfileSyntaxError, + metricsDockerfileEmptyError, + metricsCommandNotSupportedError, + metricsErrorProcessingCommandsError, + metricsBuildTargetNotReachableError, + metricsMissingOnbuildArgumentsError, + metricsUnknownInstructionError, + metricsBuildCanceled, + } { + buildsFailed.WithValues(r) + } + + metrics.Register(buildMetrics) +} diff --git a/components/engine/builder/dockerfile/mockbackend_test.go b/components/engine/builder/dockerfile/mockbackend_test.go new file mode 100644 index 0000000000..3c273c71bc --- /dev/null +++ b/components/engine/builder/dockerfile/mockbackend_test.go @@ -0,0 +1,109 @@ +package dockerfile + +import ( + "io" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/image" + "golang.org/x/net/context" +) + +// MockBackend implements the builder.Backend interface for unit testing +type MockBackend struct { + containerCreateFunc func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + commitFunc func(string, *backend.ContainerCommitConfig) (string, error) + getImageFunc func(string) (builder.Image, builder.ReleaseableLayer, error) +} + +func (m *MockBackend) TagImageWithReference(image.ID, reference.Named) error { + return nil +} + +func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error { + return nil +} + +func (m *MockBackend) ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { + if m.containerCreateFunc != nil { + return m.containerCreateFunc(config) + } + return container.ContainerCreateCreatedBody{}, nil +} + +func (m *MockBackend) ContainerRm(name string, config *types.ContainerRmConfig) error { + return nil +} + +func (m *MockBackend) Commit(cID string, cfg *backend.ContainerCommitConfig) (string, error) { + if m.commitFunc != nil { + return m.commitFunc(cID, cfg) + } + return "", nil +} + +func (m *MockBackend) ContainerKill(containerID string, sig uint64) error { + return nil +} + +func (m *MockBackend) ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error { + return nil +} + +func (m *MockBackend) ContainerWait(ctx context.Context, containerID string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) { + return nil, nil +} + +func (m *MockBackend) ContainerCreateWorkdir(containerID string) error { + return nil +} + +func (m *MockBackend) CopyOnBuild(containerID string, destPath string, srcRoot string, srcPath string, decompress bool) error { + return nil +} + +func (m *MockBackend) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ReleaseableLayer, error) { + if m.getImageFunc != nil { + return m.getImageFunc(refOrID) + } + + return &mockImage{id: "theid"}, &mockLayer{}, nil +} + +type mockImage struct { + id string + config *container.Config +} + +func (i *mockImage) ImageID() string { + return i.id +} + +func (i *mockImage) RunConfig() *container.Config { + return i.config +} + +type mockImageCache struct { + getCacheFunc func(parentID string, cfg *container.Config) (string, error) +} + +func (mic *mockImageCache) GetCache(parentID string, cfg *container.Config) (string, error) { + if mic.getCacheFunc != nil { + return mic.getCacheFunc(parentID, cfg) + } + return "", nil +} + +type mockLayer struct{} + +func (l *mockLayer) Release() error { + return nil +} + +func (l *mockLayer) Mount() (string, error) { + return "mountPath", nil +} diff --git a/components/engine/builder/dockerfile/parser/dumper/main.go b/components/engine/builder/dockerfile/parser/dumper/main.go new file mode 100644 index 0000000000..ea6205073d --- /dev/null +++ b/components/engine/builder/dockerfile/parser/dumper/main.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "os" + + "github.com/docker/docker/builder/dockerfile/parser" +) + +func main() { + var f *os.File + var err error + + if len(os.Args) < 2 { + fmt.Println("please supply filename(s)") + os.Exit(1) + } + + for _, fn := range os.Args[1:] { + f, err = os.Open(fn) + if err != nil { + panic(err) + } + defer f.Close() + + result, err := parser.Parse(f) + if err != nil { + panic(err) + } + fmt.Println(result.AST.Dump()) + } +} diff --git a/components/engine/builder/dockerfile/parser/json_test.go b/components/engine/builder/dockerfile/parser/json_test.go new file mode 100644 index 0000000000..d4489191da --- /dev/null +++ b/components/engine/builder/dockerfile/parser/json_test.go @@ -0,0 +1,59 @@ +package parser + +import ( + "testing" +) + +var invalidJSONArraysOfStrings = []string{ + `["a",42,"b"]`, + `["a",123.456,"b"]`, + `["a",{},"b"]`, + `["a",{"c": "d"},"b"]`, + `["a",["c"],"b"]`, + `["a",true,"b"]`, + `["a",false,"b"]`, + `["a",null,"b"]`, +} + +var validJSONArraysOfStrings = map[string][]string{ + `[]`: {}, + `[""]`: {""}, + `["a"]`: {"a"}, + `["a","b"]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + ` [ "a", "b" ] `: {"a", "b"}, + `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, +} + +func TestJSONArraysOfStrings(t *testing.T) { + for json, expected := range validJSONArraysOfStrings { + d := NewDefaultDirective() + + if node, _, err := parseJSON(json, d); err != nil { + t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) + } else { + i := 0 + for node != nil { + if i >= len(expected) { + t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json) + } + if node.Value != expected[i] { + t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i) + } + node = node.Next + i++ + } + if i != len(expected) { + t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json) + } + } + } + for _, json := range invalidJSONArraysOfStrings { + d := NewDefaultDirective() + + if _, _, err := parseJSON(json, d); err != errDockerfileNotStringArray { + t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) + } + } +} diff --git a/components/engine/builder/dockerfile/parser/line_parsers.go b/components/engine/builder/dockerfile/parser/line_parsers.go new file mode 100644 index 0000000000..d0e182e8e3 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/line_parsers.go @@ -0,0 +1,399 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "fmt" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "github.com/docker/docker/builder/dockerfile/command" +) + +var ( + errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") +) + +const ( + commandLabel = "LABEL" +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + child, err := newNodeFromLine(rest, d) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// helper to parse words (i.e space delimited or quoted strings) in a statement. +// The quotes are preserved as part of this function and they are stripped later +// as part of processWords(). +func parseWords(rest string, d *Directive) []string { + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + var chWidth int + + for pos := 0; pos <= len(rest); pos += chWidth { + if pos != len(rest) { + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + } + if ch == d.escapeToken { + if pos+chWidth == len(rest) { + continue // just skip an escape token at end of line + } + // If we're not quoted and we see an escape token, then always just + // add the escape token plus the char to the word, even if the char + // is a quote. + word += string(ch) + pos += chWidth + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + } + // The escape token is special except for ' quotes - can't escape anything for ' + if ch == d.escapeToken && quote != '\'' { + if pos+chWidth == len(rest) { + phase = inWord + continue // just skip the escape token at end + } + pos += chWidth + word += string(ch) + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + } + } + + return words +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseNameVal(rest string, key string, d *Directive) (*Node, error) { + // This is kind of tricky because we need to support the old + // variant: KEY name value + // as well as the new one: KEY name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new + + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil + } + + // Old format (KEY name value) + if !strings.Contains(words[0], "=") { + parts := tokenWhitespace.Split(rest, 2) + if len(parts) < 2 { + return nil, fmt.Errorf(key + " must have two arguments") + } + return newKeyValueNode(parts[0], parts[1]), nil + } + + var rootNode *Node + var prevNode *Node + for _, word := range words { + if !strings.Contains(word, "=") { + return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + + parts := strings.SplitN(word, "=", 2) + node := newKeyValueNode(parts[0], parts[1]) + rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode) + } + + return rootNode, nil +} + +func newKeyValueNode(key, value string) *Node { + return &Node{ + Value: key, + Next: &Node{Value: value}, + } +} + +func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) { + if rootNode == nil { + rootNode = node + } + if prevNode != nil { + prevNode.Next = node + } + + prevNode = node.Next + return rootNode, prevNode +} + +func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) { + node, err := parseNameVal(rest, "ENV", d) + return node, nil, err +} + +func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) { + node, err := parseNameVal(rest, commandLabel, d) + return node, nil, err +} + +// NodeFromLabels returns a Node for the injected labels +func NodeFromLabels(labels map[string]string) *Node { + keys := []string{} + for key := range labels { + keys = append(keys, key) + } + // Sort the label to have a repeatable order + sort.Strings(keys) + + labelPairs := []string{} + var rootNode *Node + var prevNode *Node + for _, key := range keys { + value := labels[key] + labelPairs = append(labelPairs, fmt.Sprintf("%q='%s'", key, value)) + // Value must be single quoted to prevent env variable expansion + // See https://github.com/docker/docker/issues/26027 + node := newKeyValueNode(key, "'"+value+"'") + rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode) + } + + return &Node{ + Value: command.Label, + Original: commandLabel + " " + strings.Join(labelPairs, " "), + Next: rootNode, + } +} + +// parses a statement containing one or more keyword definition(s) and/or +// value assignments, like `name1 name2= name3="" name4=value`. +// Note that this is a stricter format than the old format of assignment, +// allowed by parseNameVal(), in a way that this only allows assignment of the +// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. +// In addition, a keyword definition alone is of the form `keyword` like `name1` +// above. And the assignments `name2=` and `name3=""` are equivalent and +// assign an empty value to the respective keywords. +func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) { + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil, nil + } + + var ( + rootnode *Node + prevNode *Node + ) + for i, word := range words { + node := &Node{} + node.Value = word + if i == 0 { + rootnode = node + } else { + prevNode.Next = node + } + prevNode = node + } + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node := &Node{} + rootnode := node + prevnode := node + for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parseString just wraps the string in quotes and returns a working node. +func parseString(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + rest = strings.TrimLeftFunc(rest, unicode.IsSpace) + if !strings.HasPrefix(rest, "[") { + return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) + } + + var myJSON []interface{} + if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { + return nil, nil, err + } + + var top, prev *Node + for _, str := range myJSON { + s, ok := str.(string) + if !ok { + return nil, nil, errDockerfileNotStringArray + } + + node := &Node{Value: s} + if prev == nil { + top = node + } else { + prev.Next = node + } + prev = node + } + + return top, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attempts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) { + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest, d) +} + +// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument. +func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) { + // Find end of first argument + var sep int + for ; sep < len(rest); sep++ { + if unicode.IsSpace(rune(rest[sep])) { + break + } + } + next := sep + for ; next < len(rest); next++ { + if !unicode.IsSpace(rune(rest[next])) { + break + } + } + + if sep == 0 { + return nil, nil, nil + } + + typ := rest[:sep] + cmd, attrs, err := parseMaybeJSON(rest[next:], d) + if err != nil { + return nil, nil, err + } + + return &Node{Value: typ, Next: cmd}, attrs, err +} diff --git a/components/engine/builder/dockerfile/parser/line_parsers_test.go b/components/engine/builder/dockerfile/parser/line_parsers_test.go new file mode 100644 index 0000000000..cd6d57af3a --- /dev/null +++ b/components/engine/builder/dockerfile/parser/line_parsers_test.go @@ -0,0 +1,66 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseNameValOldFormat(t *testing.T) { + directive := Directive{} + node, err := parseNameVal("foo bar", "LABEL", &directive) + assert.NoError(t, err) + + expected := &Node{ + Value: "foo", + Next: &Node{Value: "bar"}, + } + assert.Equal(t, expected, node) +} + +func TestParseNameValNewFormat(t *testing.T) { + directive := Directive{} + node, err := parseNameVal("foo=bar thing=star", "LABEL", &directive) + assert.NoError(t, err) + + expected := &Node{ + Value: "foo", + Next: &Node{ + Value: "bar", + Next: &Node{ + Value: "thing", + Next: &Node{ + Value: "star", + }, + }, + }, + } + assert.Equal(t, expected, node) +} + +func TestNodeFromLabels(t *testing.T) { + labels := map[string]string{ + "foo": "bar", + "weird": "first' second", + } + expected := &Node{ + Value: "label", + Original: `LABEL "foo"='bar' "weird"='first' second'`, + Next: &Node{ + Value: "foo", + Next: &Node{ + Value: "'bar'", + Next: &Node{ + Value: "weird", + Next: &Node{ + Value: "'first' second'", + }, + }, + }, + }, + } + + node := NodeFromLabels(labels) + assert.Equal(t, expected, node) + +} diff --git a/components/engine/builder/dockerfile/parser/parser.go b/components/engine/builder/dockerfile/parser/parser.go new file mode 100644 index 0000000000..0729fa539d --- /dev/null +++ b/components/engine/builder/dockerfile/parser/parser.go @@ -0,0 +1,294 @@ +// Package parser implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "unicode" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/pkg/errors" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set + StartLine int // the line in the original dockerfile where the node begins + endLine int // the line in the original dockerfile where the node ends +} + +// Dump dumps the AST defined by `node` as a list of sexps. +// Returns a string suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + if len(node.Flags) > 0 { + str += fmt.Sprintf(" %q", node.Flags) + } + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + strconv.Quote(n.Value) + } + } + + return strings.TrimSpace(str) +} + +func (node *Node) lines(start, end int) { + node.StartLine = start + node.endLine = end +} + +// AddChild adds a new child node, and updates line information +func (node *Node) AddChild(child *Node, startLine, endLine int) { + child.lines(startLine, endLine) + if node.StartLine < 0 { + node.StartLine = startLine + } + node.endLine = endLine + node.Children = append(node.Children, child) +} + +var ( + dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) + tokenComment = regexp.MustCompile(`^#.*$`) +) + +// DefaultEscapeToken is the default escape token +const DefaultEscapeToken = '\\' + +// Directive is the structure used during a build run to hold the state of +// parsing directives. +type Directive struct { + escapeToken rune // Current escape token + lineContinuationRegex *regexp.Regexp // Current line continuation regex + processingComplete bool // Whether we are done looking for directives + escapeSeen bool // Whether the escape directive has been seen +} + +// setEscapeToken sets the default token for escaping characters in a Dockerfile. +func (d *Directive) setEscapeToken(s string) error { + if s != "`" && s != "\\" { + return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s) + } + d.escapeToken = rune(s[0]) + d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) + return nil +} + +// processLine looks for a parser directive '# escapeToken=. Parser +// directives must precede any builder instruction or other comments, and cannot +// be repeated. +func (d *Directive) processLine(line string) error { + if d.processingComplete { + return nil + } + // Processing is finished after the first call + defer func() { d.processingComplete = true }() + + tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) + if len(tecMatch) == 0 { + return nil + } + if d.escapeSeen == true { + return errors.New("only one escape parser directive can be used") + } + for i, n := range tokenEscapeCommand.SubexpNames() { + if n == "escapechar" { + d.escapeSeen = true + return d.setEscapeToken(tecMatch[i]) + } + } + return nil +} + +// NewDefaultDirective returns a new Directive with the default escapeToken token +func NewDefaultDirective() *Directive { + directive := Directive{} + directive.setEscapeToken(string(DefaultEscapeToken)) + return &directive +} + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // receives the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propagated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){ + command.Add: parseMaybeJSONToList, + command.Arg: parseNameOrNameVal, + command.Cmd: parseMaybeJSON, + command.Copy: parseMaybeJSONToList, + command.Entrypoint: parseMaybeJSON, + command.Env: parseEnv, + command.Expose: parseStringsWhitespaceDelimited, + command.From: parseStringsWhitespaceDelimited, + command.Healthcheck: parseHealthConfig, + command.Label: parseLabel, + command.Maintainer: parseString, + command.Onbuild: parseSubCommand, + command.Run: parseMaybeJSON, + command.Shell: parseMaybeJSON, + command.StopSignal: parseString, + command.User: parseString, + command.Volume: parseMaybeJSONToList, + command.Workdir: parseString, + } +} + +// newNodeFromLine splits the line into parts, and dispatches to a function +// based on the command and command arguments. A Node is created from the +// result of the dispatch. +func newNodeFromLine(line string, directive *Directive) (*Node, error) { + cmd, flags, args, err := splitCommand(line) + if err != nil { + return nil, err + } + + fn := dispatch[cmd] + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + next, attrs, err := fn(args, directive) + if err != nil { + return nil, err + } + + return &Node{ + Value: cmd, + Original: line, + Flags: flags, + Next: next, + Attributes: attrs, + }, nil +} + +// Result is the result of parsing a Dockerfile +type Result struct { + AST *Node + EscapeToken rune +} + +// Parse reads lines from a Reader, parses the lines into an AST and returns +// the AST and escape token +func Parse(rwc io.Reader) (*Result, error) { + d := NewDefaultDirective() + currentLine := 0 + root := &Node{StartLine: -1} + scanner := bufio.NewScanner(rwc) + + var err error + for scanner.Scan() { + bytes := scanner.Bytes() + switch currentLine { + case 0: + bytes, err = processFirstLine(d, bytes) + if err != nil { + return nil, err + } + default: + bytes = processLine(bytes, true) + } + currentLine++ + + startLine := currentLine + line, isEndOfLine := trimContinuationCharacter(string(bytes), d) + if isEndOfLine && line == "" { + continue + } + + for !isEndOfLine && scanner.Scan() { + bytes := processLine(scanner.Bytes(), false) + currentLine++ + + // TODO: warn this is being deprecated/removed + if isEmptyContinuationLine(bytes) { + continue + } + + continuationLine := string(bytes) + continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d) + line += continuationLine + } + + child, err := newNodeFromLine(line, d) + if err != nil { + return nil, err + } + root.AddChild(child, startLine, currentLine) + } + + return &Result{AST: root, EscapeToken: d.escapeToken}, nil +} + +func trimComments(src []byte) []byte { + return tokenComment.ReplaceAll(src, []byte{}) +} + +func trimWhitespace(src []byte) []byte { + return bytes.TrimLeftFunc(src, unicode.IsSpace) +} + +func isEmptyContinuationLine(line []byte) bool { + return len(trimComments(trimWhitespace(line))) == 0 +} + +var utf8bom = []byte{0xEF, 0xBB, 0xBF} + +func trimContinuationCharacter(line string, d *Directive) (string, bool) { + if d.lineContinuationRegex.MatchString(line) { + line = d.lineContinuationRegex.ReplaceAllString(line, "") + return line, false + } + return line, true +} + +// TODO: remove stripLeftWhitespace after deprecation period. It seems silly +// to preserve whitespace on continuation lines. Why is that done? +func processLine(token []byte, stripLeftWhitespace bool) []byte { + if stripLeftWhitespace { + token = trimWhitespace(token) + } + return trimComments(token) +} + +func processFirstLine(d *Directive, token []byte) ([]byte, error) { + token = bytes.TrimPrefix(token, utf8bom) + token = trimWhitespace(token) + err := d.processLine(string(token)) + return trimComments(token), err +} diff --git a/components/engine/builder/dockerfile/parser/parser_test.go b/components/engine/builder/dockerfile/parser/parser_test.go new file mode 100644 index 0000000000..99b614ba9f --- /dev/null +++ b/components/engine/builder/dockerfile/parser/parser_test.go @@ -0,0 +1,135 @@ +package parser + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const testDir = "testfiles" +const negativeTestDir = "testfiles-negative" +const testFileLineInfo = "testfile-line/Dockerfile" + +func getDirs(t *testing.T, dir string) []string { + f, err := os.Open(dir) + require.NoError(t, err) + defer f.Close() + + dirs, err := f.Readdirnames(0) + require.NoError(t, err) + return dirs +} + +func TestTestNegative(t *testing.T) { + for _, dir := range getDirs(t, negativeTestDir) { + dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") + + df, err := os.Open(dockerfile) + require.NoError(t, err) + defer df.Close() + + _, err = Parse(df) + assert.Error(t, err) + } +} + +func TestTestData(t *testing.T) { + for _, dir := range getDirs(t, testDir) { + dockerfile := filepath.Join(testDir, dir, "Dockerfile") + resultfile := filepath.Join(testDir, dir, "result") + + df, err := os.Open(dockerfile) + require.NoError(t, err) + defer df.Close() + + result, err := Parse(df) + require.NoError(t, err) + + content, err := ioutil.ReadFile(resultfile) + require.NoError(t, err) + + if runtime.GOOS == "windows" { + // CRLF --> CR to match Unix behavior + content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) + } + + assert.Contains(t, result.AST.Dump()+"\n", string(content), "In "+dockerfile) + } +} + +func TestParseWords(t *testing.T) { + tests := []map[string][]string{ + { + "input": {"foo"}, + "expect": {"foo"}, + }, + { + "input": {"foo bar"}, + "expect": {"foo", "bar"}, + }, + { + "input": {"foo\\ bar"}, + "expect": {"foo\\ bar"}, + }, + { + "input": {"foo=bar"}, + "expect": {"foo=bar"}, + }, + { + "input": {"foo bar 'abc xyz'"}, + "expect": {"foo", "bar", "'abc xyz'"}, + }, + { + "input": {`foo bar "abc xyz"`}, + "expect": {"foo", "bar", `"abc xyz"`}, + }, + { + "input": {"àöû"}, + "expect": {"àöû"}, + }, + { + "input": {`föo bàr "âbc xÿz"`}, + "expect": {"föo", "bàr", `"âbc xÿz"`}, + }, + } + + for _, test := range tests { + words := parseWords(test["input"][0], NewDefaultDirective()) + assert.Equal(t, test["expect"], words) + } +} + +func TestLineInformation(t *testing.T) { + df, err := os.Open(testFileLineInfo) + require.NoError(t, err) + defer df.Close() + + result, err := Parse(df) + require.NoError(t, err) + + ast := result.AST + if ast.StartLine != 5 || ast.endLine != 31 { + fmt.Fprintf(os.Stderr, "Wrong root line information: expected(%d-%d), actual(%d-%d)\n", 5, 31, ast.StartLine, ast.endLine) + t.Fatal("Root line information doesn't match result.") + } + assert.Len(t, ast.Children, 3) + expected := [][]int{ + {5, 5}, + {11, 12}, + {17, 31}, + } + for i, child := range ast.Children { + if child.StartLine != expected[i][0] || child.endLine != expected[i][1] { + t.Logf("Wrong line information for child %d: expected(%d-%d), actual(%d-%d)\n", + i, expected[i][0], expected[i][1], child.StartLine, child.endLine) + t.Fatal("Root line information doesn't match result.") + } + } +} diff --git a/components/engine/builder/dockerfile/parser/split_command.go b/components/engine/builder/dockerfile/parser/split_command.go new file mode 100644 index 0000000000..171f454f6d --- /dev/null +++ b/components/engine/builder/dockerfile/parser/split_command.go @@ -0,0 +1,118 @@ +package parser + +import ( + "strings" + "unicode" +) + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, []string, string, error) { + var args string + var flags []string + + // Make sure we get the same results irrespective of leading/trailing spaces + cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) + cmd := strings.ToLower(cmdline[0]) + + if len(cmdline) == 2 { + var err error + args, flags, err = extractBuilderFlags(cmdline[1]) + if err != nil { + return "", nil, "", err + } + } + + return cmd, flags, strings.TrimSpace(args), nil +} + +func extractBuilderFlags(line string) (string, []string, error) { + // Parses the BuilderFlags and returns the remaining part of the line + + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(line); pos++ { + if pos != len(line) { + ch = rune(line[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(line) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + + // Only keep going if the next word starts with -- + if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { + return line[pos:], words, nil + } + + phase = inWord // found something with "--", fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(line)) { + if word != "--" && (blankOK || len(word) > 0) { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if word == "--" { + return line[pos:], words, nil + } + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(line) { + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(line) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + } + } + + return "", words, nil +} diff --git a/components/engine/builder/dockerfile/parser/testfile-line/Dockerfile b/components/engine/builder/dockerfile/parser/testfile-line/Dockerfile new file mode 100644 index 0000000000..c7601c9f69 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfile-line/Dockerfile @@ -0,0 +1,35 @@ +# ESCAPE=\ + + + +FROM brimstone/ubuntu:14.04 + + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + + +ENV GOPATH \ +/go + + + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + + + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH + + + + diff --git a/components/engine/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile new file mode 100644 index 0000000000..1d65578794 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox + +ENV PATH diff --git a/components/engine/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile new file mode 100644 index 0000000000..d1be4596c7 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile @@ -0,0 +1 @@ +CMD [ "echo", [ "nested json" ] ] diff --git a/components/engine/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile new file mode 100644 index 0000000000..035b4e8bb5 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile @@ -0,0 +1,11 @@ +FROM ubuntu:14.04 +LABEL maintainer Seongyeol Lim + +COPY . /go/src/github.com/docker/docker +ADD . / +ADD null / +COPY nullfile /tmp +ADD [ "vimrc", "/tmp" ] +COPY [ "bashrc", "/tmp" ] +COPY [ "test file", "/tmp" ] +ADD [ "test file", "/tmp/test file" ] diff --git a/components/engine/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result b/components/engine/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result new file mode 100644 index 0000000000..d1f71ecc5a --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(label "maintainer" "Seongyeol Lim ") +(copy "." "/go/src/github.com/docker/docker") +(add "." "/") +(add "null" "/") +(copy "nullfile" "/tmp") +(add "vimrc" "/tmp") +(copy "bashrc" "/tmp") +(copy "test file" "/tmp") +(add "test file" "/tmp/test file") diff --git a/components/engine/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile new file mode 100644 index 0000000000..9c0952acb0 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile @@ -0,0 +1,26 @@ +#escape=\ +FROM brimstone/ubuntu:14.04 + +LABEL maintainer brimstone@the.narro.ws + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + +ENV GOPATH /go + +# Set our command +ENTRYPOINT ["/usr/local/bin/consuldock"] + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/components/engine/builder/dockerfile/parser/testfiles/brimstone-consuldock/result b/components/engine/builder/dockerfile/parser/testfiles/brimstone-consuldock/result new file mode 100644 index 0000000000..3b45db62b4 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/brimstone-consuldock/result @@ -0,0 +1,5 @@ +(from "brimstone/ubuntu:14.04") +(label "maintainer" "brimstone@the.narro.ws") +(env "GOPATH" "/go") +(entrypoint "/usr/local/bin/consuldock") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/components/engine/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile new file mode 100644 index 0000000000..25ae352166 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile @@ -0,0 +1,52 @@ +FROM brimstone/ubuntu:14.04 + +CMD [] + +ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] + +EXPOSE 8500 8600 8400 8301 8302 + +RUN apt-get update \ + && apt-get install -y unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists + +RUN cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* + +ENV GOPATH /go + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/hashicorp/consul \ + && mv $GOPATH/bin/consul /usr/bin/consul \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/components/engine/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result b/components/engine/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result new file mode 100644 index 0000000000..16492e516a --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result @@ -0,0 +1,9 @@ +(from "brimstone/ubuntu:14.04") +(cmd) +(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") +(expose "8500" "8600" "8400" "8301" "8302") +(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") +(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*") +(env "GOPATH" "/go") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/components/engine/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile new file mode 100644 index 0000000000..a8ec369ad1 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile @@ -0,0 +1,3 @@ +FROM alpine:3.5 + +RUN something \ \ No newline at end of file diff --git a/components/engine/builder/dockerfile/parser/testfiles/continue-at-eof/result b/components/engine/builder/dockerfile/parser/testfiles/continue-at-eof/result new file mode 100644 index 0000000000..14e4f09321 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/continue-at-eof/result @@ -0,0 +1,2 @@ +(from "alpine:3.5") +(run "something") diff --git a/components/engine/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile new file mode 100644 index 0000000000..42b324e77b --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile @@ -0,0 +1,36 @@ +FROM ubuntu:14.04 + +RUN echo hello\ + world\ + goodnight \ + moon\ + light\ +ning +RUN echo hello \ + world +RUN echo hello \ +world +RUN echo hello \ +goodbye\ +frog +RUN echo hello \ +world +RUN echo hi \ + \ + world \ +\ + good\ +\ +night +RUN echo goodbye\ +frog +RUN echo good\ +bye\ +frog + +RUN echo hello \ +# this is a comment + +# this is a comment with a blank line surrounding it + +this is some more useful stuff diff --git a/components/engine/builder/dockerfile/parser/testfiles/continueIndent/result b/components/engine/builder/dockerfile/parser/testfiles/continueIndent/result new file mode 100644 index 0000000000..268ae073c8 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/continueIndent/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(run "echo hello world goodnight moon lightning") +(run "echo hello world") +(run "echo hello world") +(run "echo hello goodbyefrog") +(run "echo hello world") +(run "echo hi world goodnight") +(run "echo goodbyefrog") +(run "echo goodbyefrog") +(run "echo hello this is some more useful stuff") diff --git a/components/engine/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile new file mode 100644 index 0000000000..8ccb71a578 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile @@ -0,0 +1,54 @@ +FROM cpuguy83/ubuntu +ENV NAGIOS_HOME /opt/nagios +ENV NAGIOS_USER nagios +ENV NAGIOS_GROUP nagios +ENV NAGIOS_CMDUSER nagios +ENV NAGIOS_CMDGROUP nagios +ENV NAGIOSADMIN_USER nagiosadmin +ENV NAGIOSADMIN_PASS nagios +ENV APACHE_RUN_USER nagios +ENV APACHE_RUN_GROUP nagios +ENV NAGIOS_TIMEZONE UTC + +RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list +RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx +RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) +RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) + +ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz +RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf +ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ +RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install + +RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars +RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default + +RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo + +RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf + +RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf + +RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ + sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg +RUN cp /etc/services /var/spool/postfix/etc/ + +RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix +ADD nagios.init /etc/sv/nagios/run +ADD apache.init /etc/sv/apache/run +ADD postfix.init /etc/sv/postfix/run +ADD postfix.stop /etc/sv/postfix/finish + +ADD start.sh /usr/local/bin/start_nagios + +ENV APACHE_LOCK_DIR /var/run +ENV APACHE_LOG_DIR /var/log/apache2 + +EXPOSE 80 + +VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] + +CMD ["/usr/local/bin/start_nagios"] diff --git a/components/engine/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result b/components/engine/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result new file mode 100644 index 0000000000..25dd3ddfe5 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result @@ -0,0 +1,40 @@ +(from "cpuguy83/ubuntu") +(env "NAGIOS_HOME" "/opt/nagios") +(env "NAGIOS_USER" "nagios") +(env "NAGIOS_GROUP" "nagios") +(env "NAGIOS_CMDUSER" "nagios") +(env "NAGIOS_CMDGROUP" "nagios") +(env "NAGIOSADMIN_USER" "nagiosadmin") +(env "NAGIOSADMIN_PASS" "nagios") +(env "APACHE_RUN_USER" "nagios") +(env "APACHE_RUN_GROUP" "nagios") +(env "NAGIOS_TIMEZONE" "UTC") +(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") +(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") +(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") +(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") +(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") +(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") +(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") +(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") +(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") +(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") +(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") +(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") +(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") +(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") +(run "cp /etc/services /var/spool/postfix/etc/") +(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") +(add "nagios.init" "/etc/sv/nagios/run") +(add "apache.init" "/etc/sv/apache/run") +(add "postfix.init" "/etc/sv/postfix/run") +(add "postfix.stop" "/etc/sv/postfix/finish") +(add "start.sh" "/usr/local/bin/start_nagios") +(env "APACHE_LOCK_DIR" "/var/run") +(env "APACHE_LOG_DIR" "/var/log/apache2") +(expose "80") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") +(cmd "/usr/local/bin/start_nagios") diff --git a/components/engine/builder/dockerfile/parser/testfiles/docker/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/docker/Dockerfile new file mode 100644 index 0000000000..5153453ff3 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/docker/Dockerfile @@ -0,0 +1,102 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ubuntu:14.04 +LABEL maintainer Tianon Gravi (@tianon) + +# Packaged dependencies +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ + apt-utils \ + aufs-tools \ + automake \ + btrfs-tools \ + build-essential \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + mercurial \ + pandoc \ + parallel \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags +# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 +RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' + +# Grab Go's cover tool for dead-simple code coverage testing +RUN go get golang.org/x/tools/cmd/cover + +# TODO replace FPM with some very minimal debhelper stuff +RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 + +# Get the "busybox" image source so we can build locally instead of pulling +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox + +# Setup s3cmd config +RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/components/engine/builder/dockerfile/parser/testfiles/docker/result b/components/engine/builder/dockerfile/parser/testfiles/docker/result new file mode 100644 index 0000000000..0c2f229916 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/docker/result @@ -0,0 +1,24 @@ +(from "ubuntu:14.04") +(label "maintainer" "Tianon Gravi (@tianon)") +(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") +(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") +(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") +(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") +(env "PATH" "/usr/local/go/bin:$PATH") +(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") +(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") +(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") +(env "GOARM" "5") +(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") +(run "go get golang.org/x/tools/cmd/cover") +(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") +(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") +(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") +(run "git config --global user.email 'docker-dummy@example.com'") +(run "groupadd -r docker") +(run "useradd --create-home --gid docker unprivilegeduser") +(volume "/var/lib/docker") +(workdir "/go/src/github.com/docker/docker") +(env "DOCKER_BUILDTAGS" "apparmor selinux") +(entrypoint "hack/dind") +(copy "." "/go/src/github.com/docker/docker") diff --git a/components/engine/builder/dockerfile/parser/testfiles/env/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/env/Dockerfile new file mode 100644 index 0000000000..08fa18acec --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/env/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu +ENV name value +ENV name=value +ENV name=value name2=value2 +ENV name="value value1" +ENV name=value\ value2 +ENV name="value'quote space'value2" +ENV name='value"double quote"value2' +ENV name=value\ value2 name2=value2\ value3 +ENV name="a\"b" +ENV name="a\'b" +ENV name='a\'b' +ENV name='a\'b'' +ENV name='a\"b' +ENV name="''" +# don't put anything after the next line - it must be the last line of the +# Dockerfile and it must end with \ +ENV name=value \ + name1=value1 \ + name2="value2a \ + value2b" \ + name3="value3a\n\"value3b\"" \ + name4="value4a\\nvalue4b" \ diff --git a/components/engine/builder/dockerfile/parser/testfiles/env/result b/components/engine/builder/dockerfile/parser/testfiles/env/result new file mode 100644 index 0000000000..ba0a6dd7cb --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/env/result @@ -0,0 +1,16 @@ +(from "ubuntu") +(env "name" "value") +(env "name" "value") +(env "name" "value" "name2" "value2") +(env "name" "\"value value1\"") +(env "name" "value\\ value2") +(env "name" "\"value'quote space'value2\"") +(env "name" "'value\"double quote\"value2'") +(env "name" "value\\ value2" "name2" "value2\\ value3") +(env "name" "\"a\\\"b\"") +(env "name" "\"a\\'b\"") +(env "name" "'a\\'b'") +(env "name" "'a\\'b''") +(env "name" "'a\\\"b'") +(env "name" "\"''\"") +(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") diff --git a/components/engine/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile new file mode 100644 index 0000000000..18e9a474f2 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile @@ -0,0 +1,9 @@ +# Comment here. Should not be looking for the following parser directive. +# Hence the following line will be ignored, and the subsequent backslash +# continuation will be the default. +# escape = ` + +FROM image +LABEL maintainer foo@bar.com +ENV GOPATH \ +\go \ No newline at end of file diff --git a/components/engine/builder/dockerfile/parser/testfiles/escape-after-comment/result b/components/engine/builder/dockerfile/parser/testfiles/escape-after-comment/result new file mode 100644 index 0000000000..9ab119c414 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/escape-after-comment/result @@ -0,0 +1,3 @@ +(from "image") +(label "maintainer" "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/components/engine/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile new file mode 100644 index 0000000000..366ee3c36b --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile @@ -0,0 +1,7 @@ +# escape = `` +# There is no white space line after the directives. This still succeeds, but goes +# against best practices. +FROM image +LABEL maintainer foo@bar.com +ENV GOPATH ` +\go \ No newline at end of file diff --git a/components/engine/builder/dockerfile/parser/testfiles/escape-nonewline/result b/components/engine/builder/dockerfile/parser/testfiles/escape-nonewline/result new file mode 100644 index 0000000000..9ab119c414 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/escape-nonewline/result @@ -0,0 +1,3 @@ +(from "image") +(label "maintainer" "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/components/engine/builder/dockerfile/parser/testfiles/escape/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/escape/Dockerfile new file mode 100644 index 0000000000..a515af152d --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/escape/Dockerfile @@ -0,0 +1,6 @@ +#escape = ` + +FROM image +LABEL maintainer foo@bar.com +ENV GOPATH ` +\go \ No newline at end of file diff --git a/components/engine/builder/dockerfile/parser/testfiles/escape/result b/components/engine/builder/dockerfile/parser/testfiles/escape/result new file mode 100644 index 0000000000..9ab119c414 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/escape/result @@ -0,0 +1,3 @@ +(from "image") +(label "maintainer" "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/components/engine/builder/dockerfile/parser/testfiles/escapes/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/escapes/Dockerfile new file mode 100644 index 0000000000..03062394ae --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/escapes/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +LABEL maintainer Erik \\Hollensbe \" + +RUN apt-get \update && \ + apt-get \"install znc -y +ADD \conf\\" /.znc + +RUN foo \ + +bar \ + +baz + +CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/components/engine/builder/dockerfile/parser/testfiles/escapes/result b/components/engine/builder/dockerfile/parser/testfiles/escapes/result new file mode 100644 index 0000000000..98e3e3b737 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/escapes/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(label "maintainer" "Erik \\\\Hollensbe \\\"") +(run "apt-get \\update && apt-get \\\"install znc -y") +(add "\\conf\\\\\"" "/.znc") +(run "foo bar baz") +(cmd "/usr\\\"/bin/znc" "-f" "-r") diff --git a/components/engine/builder/dockerfile/parser/testfiles/flags/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/flags/Dockerfile new file mode 100644 index 0000000000..2418e0f069 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/flags/Dockerfile @@ -0,0 +1,10 @@ +FROM scratch +COPY foo /tmp/ +COPY --user=me foo /tmp/ +COPY --doit=true foo /tmp/ +COPY --user=me --doit=true foo /tmp/ +COPY --doit=true -- foo /tmp/ +COPY -- foo /tmp/ +CMD --doit [ "a", "b" ] +CMD --doit=true -- [ "a", "b" ] +CMD --doit -- [ ] diff --git a/components/engine/builder/dockerfile/parser/testfiles/flags/result b/components/engine/builder/dockerfile/parser/testfiles/flags/result new file mode 100644 index 0000000000..4578f4cba4 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/flags/result @@ -0,0 +1,10 @@ +(from "scratch") +(copy "foo" "/tmp/") +(copy ["--user=me"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy ["--user=me" "--doit=true"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy "foo" "/tmp/") +(cmd ["--doit"] "a" "b") +(cmd ["--doit=true"] "a" "b") +(cmd ["--doit"]) diff --git a/components/engine/builder/dockerfile/parser/testfiles/health/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/health/Dockerfile new file mode 100644 index 0000000000..081e442882 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/health/Dockerfile @@ -0,0 +1,10 @@ +FROM debian +ADD check.sh main.sh /app/ +CMD /app/main.sh +HEALTHCHECK +HEALTHCHECK --interval=5s --timeout=3s --retries=3 \ + CMD /app/check.sh --quiet +HEALTHCHECK CMD +HEALTHCHECK CMD a b +HEALTHCHECK --timeout=3s CMD ["foo"] +HEALTHCHECK CONNECT TCP 7000 diff --git a/components/engine/builder/dockerfile/parser/testfiles/health/result b/components/engine/builder/dockerfile/parser/testfiles/health/result new file mode 100644 index 0000000000..092924f88c --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/health/result @@ -0,0 +1,9 @@ +(from "debian") +(add "check.sh" "main.sh" "/app/") +(cmd "/app/main.sh") +(healthcheck) +(healthcheck ["--interval=5s" "--timeout=3s" "--retries=3"] "CMD" "/app/check.sh --quiet") +(healthcheck "CMD") +(healthcheck "CMD" "a b") +(healthcheck ["--timeout=3s"] "CMD" "foo") +(healthcheck "CONNECT" "TCP 7000") diff --git a/components/engine/builder/dockerfile/parser/testfiles/influxdb/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/influxdb/Dockerfile new file mode 100644 index 0000000000..587fb9b54b --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/influxdb/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install wget -y +RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb +RUN dpkg -i influxdb_latest_amd64.deb +RUN rm -r /opt/influxdb/shared + +VOLUME /opt/influxdb/shared + +CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml + +EXPOSE 8083 +EXPOSE 8086 +EXPOSE 8090 +EXPOSE 8099 diff --git a/components/engine/builder/dockerfile/parser/testfiles/influxdb/result b/components/engine/builder/dockerfile/parser/testfiles/influxdb/result new file mode 100644 index 0000000000..0998e87e63 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/influxdb/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install wget -y") +(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") +(run "dpkg -i influxdb_latest_amd64.deb") +(run "rm -r /opt/influxdb/shared") +(volume "/opt/influxdb/shared") +(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") +(expose "8083") +(expose "8086") +(expose "8090") +(expose "8099") diff --git a/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile new file mode 100644 index 0000000000..39fe27d99c --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile @@ -0,0 +1 @@ +CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result new file mode 100644 index 0000000000..afc220c2a7 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result @@ -0,0 +1 @@ +(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") diff --git a/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile new file mode 100644 index 0000000000..eaae081a06 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile @@ -0,0 +1 @@ +CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result new file mode 100644 index 0000000000..484804e2b2 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result @@ -0,0 +1 @@ +(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") diff --git a/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile new file mode 100644 index 0000000000..c3ac63c07a --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile @@ -0,0 +1 @@ +CMD ['echo','single quotes are invalid JSON'] diff --git a/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result new file mode 100644 index 0000000000..6147891207 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result @@ -0,0 +1 @@ +(cmd "['echo','single quotes are invalid JSON']") diff --git a/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile new file mode 100644 index 0000000000..5fd4afa522 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "Please, close the brackets when you're done" diff --git a/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result new file mode 100644 index 0000000000..1ffbb8ff85 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"Please, close the brackets when you're done\"") diff --git a/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile new file mode 100644 index 0000000000..30cc4bb48f --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "look ma, no quote!] diff --git a/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result new file mode 100644 index 0000000000..32048147b5 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"look ma, no quote!]") diff --git a/components/engine/builder/dockerfile/parser/testfiles/json/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/json/Dockerfile new file mode 100644 index 0000000000..a586917110 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/json/Dockerfile @@ -0,0 +1,8 @@ +CMD [] +CMD [""] +CMD ["a"] +CMD ["a","b"] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"] diff --git a/components/engine/builder/dockerfile/parser/testfiles/json/result b/components/engine/builder/dockerfile/parser/testfiles/json/result new file mode 100644 index 0000000000..c6553e6e1a --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/json/result @@ -0,0 +1,8 @@ +(cmd) +(cmd "") +(cmd "a") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00") diff --git a/components/engine/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile new file mode 100644 index 0000000000..728ec9a787 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +LABEL maintainer James Turnbull "james@example.com" +ENV REFRESHED_AT 2014-06-01 +RUN apt-get update +RUN apt-get -y install redis-server redis-tools +EXPOSE 6379 +ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/components/engine/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result b/components/engine/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result new file mode 100644 index 0000000000..e774bc4f97 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result @@ -0,0 +1,7 @@ +(from "ubuntu:14.04") +(label "maintainer" "James Turnbull \"james@example.com\"") +(env "REFRESHED_AT" "2014-06-01") +(run "apt-get update") +(run "apt-get -y install redis-server redis-tools") +(expose "6379") +(entrypoint "/usr/bin/redis-server") diff --git a/components/engine/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile new file mode 100644 index 0000000000..27f28cb921 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile @@ -0,0 +1,48 @@ +FROM busybox:buildroot-2014.02 + +LABEL maintainer docker + +ONBUILD RUN ["echo", "test"] +ONBUILD RUN echo test +ONBUILD COPY . / + + +# RUN Commands \ +# linebreak in comment \ +RUN ["ls", "-la"] +RUN ["echo", "'1234'"] +RUN echo "1234" +RUN echo 1234 +RUN echo '1234' && \ + echo "456" && \ + echo 789 +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /test /test2 /test3/test + +# ENV \ +ENV SCUBA 1 DUBA 3 +ENV SCUBA "1 DUBA 3" + +# CMD \ +CMD ["echo", "test"] +CMD echo test +CMD echo "test" +CMD echo 'test' +CMD echo 'test' | wc - + +#EXPOSE\ +EXPOSE 3000 +EXPOSE 9000 5000 6000 + +USER docker +USER docker:root + +VOLUME ["/test"] +VOLUME ["/test", "/test2"] +VOLUME /test3 + +WORKDIR /test + +ADD . / +COPY . copy diff --git a/components/engine/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result b/components/engine/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result new file mode 100644 index 0000000000..8a499ff948 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result @@ -0,0 +1,29 @@ +(from "busybox:buildroot-2014.02") +(label "maintainer" "docker ") +(onbuild (run "echo" "test")) +(onbuild (run "echo test")) +(onbuild (copy "." "/")) +(run "ls" "-la") +(run "echo" "'1234'") +(run "echo \"1234\"") +(run "echo 1234") +(run "echo '1234' && echo \"456\" && echo 789") +(run "sh -c 'echo root:testpass > /tmp/passwd'") +(run "mkdir -p /test /test2 /test3/test") +(env "SCUBA" "1 DUBA 3") +(env "SCUBA" "\"1 DUBA 3\"") +(cmd "echo" "test") +(cmd "echo test") +(cmd "echo \"test\"") +(cmd "echo 'test'") +(cmd "echo 'test' | wc -") +(expose "3000") +(expose "9000" "5000" "6000") +(user "docker") +(user "docker:root") +(volume "/test") +(volume "/test" "/test2") +(volume "/test3") +(workdir "/test") +(add "." "/") +(copy "." "copy") diff --git a/components/engine/builder/dockerfile/parser/testfiles/mail/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/mail/Dockerfile new file mode 100644 index 0000000000..f64c1168c1 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/mail/Dockerfile @@ -0,0 +1,16 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y +ADD .muttrc / +ADD .offlineimaprc / +ADD .tmux.conf / +ADD mutt /.mutt +ADD vim /.vim +ADD vimrc /.vimrc +ADD crontab /etc/crontab +RUN chmod 644 /etc/crontab +RUN mkdir /Mail +RUN mkdir /.offlineimap +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD setsid cron; tmux -2 diff --git a/components/engine/builder/dockerfile/parser/testfiles/mail/result b/components/engine/builder/dockerfile/parser/testfiles/mail/result new file mode 100644 index 0000000000..a0efcf04b6 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/mail/result @@ -0,0 +1,14 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") +(add ".muttrc" "/") +(add ".offlineimaprc" "/") +(add ".tmux.conf" "/") +(add "mutt" "/.mutt") +(add "vim" "/.vim") +(add "vimrc" "/.vimrc") +(add "crontab" "/etc/crontab") +(run "chmod 644 /etc/crontab") +(run "mkdir /Mail") +(run "mkdir /.offlineimap") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "setsid cron; tmux -2") diff --git a/components/engine/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile new file mode 100644 index 0000000000..57bb5976a3 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile @@ -0,0 +1,3 @@ +FROM foo + +VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs diff --git a/components/engine/builder/dockerfile/parser/testfiles/multiple-volumes/result b/components/engine/builder/dockerfile/parser/testfiles/multiple-volumes/result new file mode 100644 index 0000000000..18dbdeeaa0 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/multiple-volumes/result @@ -0,0 +1,2 @@ +(from "foo") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") diff --git a/components/engine/builder/dockerfile/parser/testfiles/mumble/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/mumble/Dockerfile new file mode 100644 index 0000000000..5b9ec06a6c --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/mumble/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install libcap2-bin mumble-server -y + +ADD ./mumble-server.ini /etc/mumble-server.ini + +CMD /usr/sbin/murmurd diff --git a/components/engine/builder/dockerfile/parser/testfiles/mumble/result b/components/engine/builder/dockerfile/parser/testfiles/mumble/result new file mode 100644 index 0000000000..a0036a943e --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/mumble/result @@ -0,0 +1,4 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install libcap2-bin mumble-server -y") +(add "./mumble-server.ini" "/etc/mumble-server.ini") +(cmd "/usr/sbin/murmurd") diff --git a/components/engine/builder/dockerfile/parser/testfiles/nginx/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/nginx/Dockerfile new file mode 100644 index 0000000000..0a35e2c6b2 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/nginx/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +LABEL maintainer Erik Hollensbe + +RUN apt-get update && apt-get install nginx-full -y +RUN rm -rf /etc/nginx +ADD etc /etc/nginx +RUN chown -R root:root /etc/nginx +RUN /usr/sbin/nginx -qt +RUN mkdir /www + +CMD ["/usr/sbin/nginx"] + +VOLUME /www +EXPOSE 80 diff --git a/components/engine/builder/dockerfile/parser/testfiles/nginx/result b/components/engine/builder/dockerfile/parser/testfiles/nginx/result new file mode 100644 index 0000000000..a895fadbbe --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/nginx/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(label "maintainer" "Erik Hollensbe ") +(run "apt-get update && apt-get install nginx-full -y") +(run "rm -rf /etc/nginx") +(add "etc" "/etc/nginx") +(run "chown -R root:root /etc/nginx") +(run "/usr/sbin/nginx -qt") +(run "mkdir /www") +(cmd "/usr/sbin/nginx") +(volume "/www") +(expose "80") diff --git a/components/engine/builder/dockerfile/parser/testfiles/tf2/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/tf2/Dockerfile new file mode 100644 index 0000000000..72b79bdd7d --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/tf2/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu:12.04 + +EXPOSE 27015 +EXPOSE 27005 +EXPOSE 26901 +EXPOSE 27020 + +RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y +RUN mkdir -p /steam +RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam +ADD ./script /steam/script +RUN /steam/steamcmd.sh +runscript /steam/script +RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf +RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf +ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg +ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg +ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg +RUN rm -r /steam/tf2/tf/addons/sourcemod/configs +ADD ./configs /steam/tf2/tf/addons/sourcemod/configs +RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en +RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en + +CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/components/engine/builder/dockerfile/parser/testfiles/tf2/result b/components/engine/builder/dockerfile/parser/testfiles/tf2/result new file mode 100644 index 0000000000..d4f94cd8be --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/tf2/result @@ -0,0 +1,20 @@ +(from "ubuntu:12.04") +(expose "27015") +(expose "27005") +(expose "26901") +(expose "27020") +(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") +(run "mkdir -p /steam") +(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") +(add "./script" "/steam/script") +(run "/steam/steamcmd.sh +runscript /steam/script") +(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") +(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") +(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") +(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") +(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") +(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") +(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") +(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") diff --git a/components/engine/builder/dockerfile/parser/testfiles/weechat/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/weechat/Dockerfile new file mode 100644 index 0000000000..4842088166 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/weechat/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y + +ADD .weechat /.weechat +ADD .tmux.conf / +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD zsh -c weechat diff --git a/components/engine/builder/dockerfile/parser/testfiles/weechat/result b/components/engine/builder/dockerfile/parser/testfiles/weechat/result new file mode 100644 index 0000000000..c3abb4c54f --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/weechat/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") +(add ".weechat" "/.weechat") +(add ".tmux.conf" "/") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "zsh -c weechat") diff --git a/components/engine/builder/dockerfile/parser/testfiles/znc/Dockerfile b/components/engine/builder/dockerfile/parser/testfiles/znc/Dockerfile new file mode 100644 index 0000000000..626b126d8a --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/znc/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +LABEL maintainer Erik Hollensbe + +RUN apt-get update && apt-get install znc -y +ADD conf /.znc + +CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/components/engine/builder/dockerfile/parser/testfiles/znc/result b/components/engine/builder/dockerfile/parser/testfiles/znc/result new file mode 100644 index 0000000000..bfc7f65135 --- /dev/null +++ b/components/engine/builder/dockerfile/parser/testfiles/znc/result @@ -0,0 +1,5 @@ +(from "ubuntu:14.04") +(label "maintainer" "Erik Hollensbe ") +(run "apt-get update && apt-get install znc -y") +(add "conf" "/.znc") +(cmd "/usr/bin/znc" "-f" "-r") diff --git a/components/engine/builder/dockerfile/shell_parser.go b/components/engine/builder/dockerfile/shell_parser.go new file mode 100644 index 0000000000..b72ac291d9 --- /dev/null +++ b/components/engine/builder/dockerfile/shell_parser.go @@ -0,0 +1,344 @@ +package dockerfile + +import ( + "bytes" + "strings" + "text/scanner" + "unicode" + + "github.com/pkg/errors" +) + +// ShellLex performs shell word splitting and variable expansion. +// +// ShellLex takes a string and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section +type ShellLex struct { + escapeToken rune +} + +// NewShellLex creates a new ShellLex which uses escapeToken to escape quotes. +func NewShellLex(escapeToken rune) *ShellLex { + return &ShellLex{escapeToken: escapeToken} +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func (s *ShellLex) ProcessWord(word string, env []string) (string, error) { + word, _, err := s.process(word, env) + return word, err +} + +// ProcessWords will use the 'env' list of environment variables, +// and replace any env var references in 'word' then it will also +// return a slice of strings which represents the 'word' +// split up based on spaces - taking into account quotes. Note that +// this splitting is done **after** the env var substitutions are done. +// Note, each one is trimmed to remove leading and trailing spaces (unless +// they are quoted", but ProcessWord retains spaces between words. +func (s *ShellLex) ProcessWords(word string, env []string) ([]string, error) { + _, words, err := s.process(word, env) + return words, err +} + +func (s *ShellLex) process(word string, env []string) (string, []string, error) { + sw := &shellWord{ + envs: env, + escapeToken: s.escapeToken, + } + sw.scanner.Init(strings.NewReader(word)) + return sw.process(word) +} + +type shellWord struct { + scanner scanner.Scanner + envs []string + escapeToken rune +} + +func (sw *shellWord) process(source string) (string, []string, error) { + word, words, err := sw.processStopOn(scanner.EOF) + if err != nil { + err = errors.Wrapf(err, "failed to process %q", source) + } + return word, words, err +} + +type wordsStruct struct { + word string + words []string + inWord bool +} + +func (w *wordsStruct) addChar(ch rune) { + if unicode.IsSpace(ch) && w.inWord { + if len(w.word) != 0 { + w.words = append(w.words, w.word) + w.word = "" + w.inWord = false + } + } else if !unicode.IsSpace(ch) { + w.addRawChar(ch) + } +} + +func (w *wordsStruct) addRawChar(ch rune) { + w.word += string(ch) + w.inWord = true +} + +func (w *wordsStruct) addString(str string) { + var scan scanner.Scanner + scan.Init(strings.NewReader(str)) + for scan.Peek() != scanner.EOF { + w.addChar(scan.Next()) + } +} + +func (w *wordsStruct) addRawString(str string) { + w.word += str + w.inWord = true +} + +func (w *wordsStruct) getWords() []string { + if len(w.word) > 0 { + w.words = append(w.words, w.word) + + // Just in case we're called again by mistake + w.word = "" + w.inWord = false + } + return w.words +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { + var result bytes.Buffer + var words wordsStruct + + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + + if stopChar != scanner.EOF && ch == stopChar { + sw.scanner.Next() + break + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", []string{}, err + } + result.WriteString(tmp) + + if ch == rune('$') { + words.addString(tmp) + } else { + words.addRawString(tmp) + } + } else { + // Not special, just add it to the result + ch = sw.scanner.Next() + + if ch == sw.escapeToken { + // '\' (default escape token, but ` allowed) escapes, except end of line + ch = sw.scanner.Next() + + if ch == scanner.EOF { + break + } + + words.addRawChar(ch) + } else { + words.addChar(ch) + } + + result.WriteRune(ch) + } + } + + return result.String(), words.getWords(), nil +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + // + // From the "sh" man page: + // Single Quotes + // Enclosing characters in single quotes preserves the literal meaning of + // all the characters (except single quotes, making it impossible to put + // single-quotes in a single-quoted string). + + var result bytes.Buffer + + sw.scanner.Next() + + for { + ch := sw.scanner.Next() + switch ch { + case scanner.EOF: + return "", errors.New("unexpected end of statement while looking for matching single-quote") + case '\'': + return result.String(), nil + } + result.WriteRune(ch) + } +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ (or ` if escape token set accordingly) + // + // From the "sh" man page: + // Double Quotes + // Enclosing characters within double quotes preserves the literal meaning + // of all characters except dollarsign ($), backquote (`), and backslash + // (\). The backslash inside double quotes is historically weird, and + // serves to quote only the following characters: + // $ ` " \ . + // Otherwise it remains literal. + + var result bytes.Buffer + + sw.scanner.Next() + + for { + switch sw.scanner.Peek() { + case scanner.EOF: + return "", errors.New("unexpected end of statement while looking for matching double-quote") + case '"': + sw.scanner.Next() + return result.String(), nil + case '$': + value, err := sw.processDollar() + if err != nil { + return "", err + } + result.WriteString(value) + default: + ch := sw.scanner.Next() + if ch == sw.escapeToken { + switch sw.scanner.Peek() { + case scanner.EOF: + // Ignore \ at end of word + continue + case '"', '$', sw.escapeToken: + // These chars can be escaped, all other \'s are left as-is + // Note: for now don't do anything special with ` chars. + // Not sure what to do with them anyway since we're not going + // to execute the text in there (not now anyway). + ch = sw.scanner.Next() + } + } + result.WriteRune(ch) + } + } +} + +func (sw *shellWord) processDollar() (string, error) { + sw.scanner.Next() + + // $xxx case + if sw.scanner.Peek() != '{' { + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil + } + + sw.scanner.Next() + name := sw.processName() + ch := sw.scanner.Peek() + if ch == '}' { + // Normal ${xx} case + sw.scanner.Next() + return sw.getEnv(name), nil + } + if ch == ':' { + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + + sw.scanner.Next() // skip over : + modifier := sw.scanner.Next() + + word, _, err := sw.processStopOn('}') + if err != nil { + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + return newValue, nil + + default: + return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier) + } + } + return "", errors.Errorf("missing ':' in substitution") +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name bytes.Buffer + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if name.Len() == 0 && unicode.IsDigit(ch) { + ch = sw.scanner.Next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.scanner.Next() + name.WriteRune(ch) + } + + return name.String() +} + +func (sw *shellWord) getEnv(name string) string { + for _, env := range sw.envs { + i := strings.Index(env, "=") + if i < 0 { + if equalEnvKeys(name, env) { + // Should probably never get here, but just in case treat + // it like "var" and "var=" are the same + return "" + } + continue + } + compareName := env[:i] + if !equalEnvKeys(name, compareName) { + continue + } + return env[i+1:] + } + return "" +} diff --git a/components/engine/builder/dockerfile/shell_parser_test.go b/components/engine/builder/dockerfile/shell_parser_test.go new file mode 100644 index 0000000000..c4f7e0efd4 --- /dev/null +++ b/components/engine/builder/dockerfile/shell_parser_test.go @@ -0,0 +1,151 @@ +package dockerfile + +import ( + "bufio" + "os" + "runtime" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestShellParser4EnvVars(t *testing.T) { + fn := "envVarTest" + lineCount := 0 + + file, err := os.Open(fn) + assert.NoError(t, err) + defer file.Close() + + shlex := NewShellLex('\\') + scanner := bufio.NewScanner(file) + envs := []string{"PWD=/home", "SHELL=bash", "KOREAN=한국어"} + for scanner.Scan() { + line := scanner.Text() + lineCount++ + + // Trim comments and blank lines + i := strings.Index(line, "#") + if i >= 0 { + line = line[:i] + } + line = strings.TrimSpace(line) + + if line == "" { + continue + } + + words := strings.Split(line, "|") + assert.Len(t, words, 3) + + platform := strings.TrimSpace(words[0]) + source := strings.TrimSpace(words[1]) + expected := strings.TrimSpace(words[2]) + + // Key W=Windows; A=All; U=Unix + if platform != "W" && platform != "A" && platform != "U" { + t.Fatalf("Invalid tag %s at line %d of %s. Must be W, A or U", platform, lineCount, fn) + } + + if ((platform == "W" || platform == "A") && runtime.GOOS == "windows") || + ((platform == "U" || platform == "A") && runtime.GOOS != "windows") { + newWord, err := shlex.ProcessWord(source, envs) + if expected == "error" { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, newWord, expected) + } + } + } +} + +func TestShellParser4Words(t *testing.T) { + fn := "wordsTest" + + file, err := os.Open(fn) + if err != nil { + t.Fatalf("Can't open '%s': %s", err, fn) + } + defer file.Close() + + shlex := NewShellLex('\\') + envs := []string{} + scanner := bufio.NewScanner(file) + lineNum := 0 + for scanner.Scan() { + line := scanner.Text() + lineNum = lineNum + 1 + + if strings.HasPrefix(line, "#") { + continue + } + + if strings.HasPrefix(line, "ENV ") { + line = strings.TrimLeft(line[3:], " ") + envs = append(envs, line) + continue + } + + words := strings.Split(line, "|") + if len(words) != 2 { + t.Fatalf("Error in '%s'(line %d) - should be exactly one | in: %q", fn, lineNum, line) + } + test := strings.TrimSpace(words[0]) + expected := strings.Split(strings.TrimLeft(words[1], " "), ",") + + result, err := shlex.ProcessWords(test, envs) + + if err != nil { + result = []string{"error"} + } + + if len(result) != len(expected) { + t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result) + } + for i, w := range expected { + if w != result[i] { + t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result) + } + } + } +} + +func TestGetEnv(t *testing.T) { + sw := &shellWord{envs: nil} + + sw.envs = []string{} + if sw.getEnv("foo") != "" { + t.Fatal("2 - 'foo' should map to ''") + } + + sw.envs = []string{"foo"} + if sw.getEnv("foo") != "" { + t.Fatal("3 - 'foo' should map to ''") + } + + sw.envs = []string{"foo="} + if sw.getEnv("foo") != "" { + t.Fatal("4 - 'foo' should map to ''") + } + + sw.envs = []string{"foo=bar"} + if sw.getEnv("foo") != "bar" { + t.Fatal("5 - 'foo' should map to 'bar'") + } + + sw.envs = []string{"foo=bar", "car=hat"} + if sw.getEnv("foo") != "bar" { + t.Fatal("6 - 'foo' should map to 'bar'") + } + if sw.getEnv("car") != "hat" { + t.Fatal("7 - 'car' should map to 'hat'") + } + + // Make sure we grab the first 'car' in the list + sw.envs = []string{"foo=bar", "car=hat", "car=bike"} + if sw.getEnv("car") != "hat" { + t.Fatal("8 - 'car' should map to 'hat'") + } +} diff --git a/components/engine/builder/dockerfile/support.go b/components/engine/builder/dockerfile/support.go new file mode 100644 index 0000000000..e87588910b --- /dev/null +++ b/components/engine/builder/dockerfile/support.go @@ -0,0 +1,19 @@ +package dockerfile + +import "strings" + +// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile +// for exec form it returns untouched args slice +// for shell form it returns concatenated args as the first element of a slice +func handleJSONArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} diff --git a/components/engine/builder/dockerfile/support_test.go b/components/engine/builder/dockerfile/support_test.go new file mode 100644 index 0000000000..7cc6fe9dcb --- /dev/null +++ b/components/engine/builder/dockerfile/support_test.go @@ -0,0 +1,65 @@ +package dockerfile + +import "testing" + +type testCase struct { + name string + args []string + attributes map[string]bool + expected []string +} + +func initTestCases() []testCase { + testCases := []testCase{} + + testCases = append(testCases, testCase{ + name: "empty args", + args: []string{}, + attributes: make(map[string]bool), + expected: []string{}, + }) + + jsonAttributes := make(map[string]bool) + jsonAttributes["json"] = true + + testCases = append(testCases, testCase{ + name: "json attribute with one element", + args: []string{"foo"}, + attributes: jsonAttributes, + expected: []string{"foo"}, + }) + + testCases = append(testCases, testCase{ + name: "json attribute with two elements", + args: []string{"foo", "bar"}, + attributes: jsonAttributes, + expected: []string{"foo", "bar"}, + }) + + testCases = append(testCases, testCase{ + name: "no attributes", + args: []string{"foo", "bar"}, + attributes: nil, + expected: []string{"foo bar"}, + }) + + return testCases +} + +func TestHandleJSONArgs(t *testing.T) { + testCases := initTestCases() + + for _, test := range testCases { + arguments := handleJSONArgs(test.args, test.attributes) + + if len(arguments) != len(test.expected) { + t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments)) + } + + for i := range test.expected { + if arguments[i] != test.expected[i] { + t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i]) + } + } + } +} diff --git a/components/engine/builder/dockerfile/utils_test.go b/components/engine/builder/dockerfile/utils_test.go new file mode 100644 index 0000000000..80a3f1babf --- /dev/null +++ b/components/engine/builder/dockerfile/utils_test.go @@ -0,0 +1,50 @@ +package dockerfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path, func() { + err = os.RemoveAll(path) + + if err != nil { + t.Fatalf("Error when removing directory %s: %s", path, err) + } + } +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + + if err != nil { + t.Fatalf("Error when creating %s file: %s", filename, err) + } + + return filePath +} + +// createTestSymlink creates a symlink file within dir which points to oldname +func createTestSymlink(t *testing.T, dir, filename, oldname string) string { + filePath := filepath.Join(dir, filename) + if err := os.Symlink(oldname, filePath); err != nil { + t.Fatalf("Error when creating %s symlink to %s: %s", filename, oldname, err) + } + + return filePath +} diff --git a/components/engine/builder/dockerfile/wordsTest b/components/engine/builder/dockerfile/wordsTest new file mode 100644 index 0000000000..1fd9f19433 --- /dev/null +++ b/components/engine/builder/dockerfile/wordsTest @@ -0,0 +1,30 @@ +hello | hello +hello${hi}bye | hellobye +ENV hi=hi +hello${hi}bye | hellohibye +ENV space=abc def +hello${space}bye | helloabc,defbye +hello"${space}"bye | helloabc defbye +hello "${space}"bye | hello,abc defbye +ENV leading= ab c +hello${leading}def | hello,ab,cdef +hello"${leading}" def | hello ab c,def +hello"${leading}" | hello ab c +hello${leading} | hello,ab,c +# next line MUST have 3 trailing spaces, don't erase them! +ENV trailing=ab c +hello${trailing} | helloab,c +hello${trailing}d | helloab,c,d +hello"${trailing}"d | helloab c d +# next line MUST have 3 trailing spaces, don't erase them! +hel"lo${trailing}" | helloab c +hello" there " | hello there +hello there | hello,there +hello\ there | hello there +hello" there | error +hello\" there | hello",there +hello"\\there" | hello\there +hello"\there" | hello\there +hello'\\there' | hello\\there +hello'\there' | hello\there +hello'$there' | hello$there diff --git a/components/engine/builder/dockerignore/dockerignore.go b/components/engine/builder/dockerignore/dockerignore.go new file mode 100644 index 0000000000..2db67be799 --- /dev/null +++ b/components/engine/builder/dockerignore/dockerignore.go @@ -0,0 +1,49 @@ +package dockerignore + +import ( + "bufio" + "bytes" + "fmt" + "io" + "path/filepath" + "strings" +) + +// ReadAll reads a .dockerignore file and returns the list of file patterns +// to ignore. Note this will trim whitespace from each line as well +// as use GO's "clean" func to get the shortest/cleanest path for each. +func ReadAll(reader io.Reader) ([]string, error) { + if reader == nil { + return nil, nil + } + + scanner := bufio.NewScanner(reader) + var excludes []string + currentLine := 0 + + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + pattern := string(scannedBytes) + currentLine++ + // Lines starting with # (comments) are ignored before processing + if strings.HasPrefix(pattern, "#") { + continue + } + pattern = strings.TrimSpace(pattern) + if pattern == "" { + continue + } + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + excludes = append(excludes, pattern) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("Error reading .dockerignore: %v", err) + } + return excludes, nil +} diff --git a/components/engine/builder/dockerignore/dockerignore_test.go b/components/engine/builder/dockerignore/dockerignore_test.go new file mode 100644 index 0000000000..948f9d886b --- /dev/null +++ b/components/engine/builder/dockerignore/dockerignore_test.go @@ -0,0 +1,57 @@ +package dockerignore + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestReadAll(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "dockerignore-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + di, err := ReadAll(nil) + if err != nil { + t.Fatalf("Expected not to have error, got %v", err) + } + + if diLen := len(di); diLen != 0 { + t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) + } + + diName := filepath.Join(tmpDir, ".dockerignore") + content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") + err = ioutil.WriteFile(diName, []byte(content), 0777) + if err != nil { + t.Fatal(err) + } + + diFd, err := os.Open(diName) + if err != nil { + t.Fatal(err) + } + defer diFd.Close() + + di, err = ReadAll(diFd) + if err != nil { + t.Fatal(err) + } + + if di[0] != "test1" { + t.Fatal("First element is not test1") + } + if di[1] != "/test2" { + t.Fatal("Second element is not /test2") + } + if di[2] != "/a/file/here" { + t.Fatal("Third element is not /a/file/here") + } + if di[3] != "lastfile" { + t.Fatal("Fourth element is not lastfile") + } +} diff --git a/components/engine/builder/remotecontext/detect.go b/components/engine/builder/remotecontext/detect.go new file mode 100644 index 0000000000..177596093a --- /dev/null +++ b/components/engine/builder/remotecontext/detect.go @@ -0,0 +1,178 @@ +package remotecontext + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +// Detect returns a context and dockerfile from remote location or local +// archive. progressReader is only used if remoteURL is actually a URL +// (not empty, and not a Git endpoint). +func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) { + remoteURL := config.Options.RemoteContext + dockerfilePath := config.Options.Dockerfile + + switch { + case remoteURL == "": + remote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath) + case urlutil.IsGitURL(remoteURL): + remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath) + case urlutil.IsURL(remoteURL): + remote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc) + default: + err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) + } + return +} + +func newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) { + c, err := MakeTarSumContext(rc) + if err != nil { + return nil, nil, err + } + + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) { + df, err := openAt(c, dockerfilePath) + if err != nil { + if os.IsNotExist(err) { + if dockerfilePath == builder.DefaultDockerfileName { + lowercase := strings.ToLower(dockerfilePath) + if _, err := StatAt(c, lowercase); err == nil { + return withDockerfileFromContext(c, lowercase) + } + } + return nil, nil, errors.Errorf("Cannot locate specified Dockerfile: %s", dockerfilePath) // backwards compatible error + } + c.Close() + return nil, nil, err + } + + res, err := readAndParseDockerfile(dockerfilePath, df) + if err != nil { + return nil, nil, err + } + + df.Close() + + if err := removeDockerfile(c, dockerfilePath); err != nil { + c.Close() + return nil, nil, err + } + + return c, res, nil +} + +func newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) { + c, err := MakeGitContext(gitURL) // TODO: change this to NewLazyContext + if err != nil { + return nil, nil, err + } + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) { + var dockerfile io.ReadCloser + dockerfileFoundErr := errors.New("found-dockerfile") + c, err := MakeRemoteContext(url, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile = rc + return nil, dockerfileFoundErr + }, + // fallback handler (tar context) + "": func(rc io.ReadCloser) (io.ReadCloser, error) { + return progressReader(rc), nil + }, + }) + if err != nil { + if err == dockerfileFoundErr { + res, err := parser.Parse(dockerfile) + if err != nil { + return nil, nil, err + } + return nil, res, nil + } + return nil, nil, err + } + + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func removeDockerfile(c modifiableContext, filesToRemove ...string) error { + f, err := openAt(c, ".dockerignore") + // Note that a missing .dockerignore file isn't treated as an error + switch { + case os.IsNotExist(err): + return nil + case err != nil: + return err + } + excludes, err := dockerignore.ReadAll(f) + if err != nil { + return err + } + f.Close() + filesToRemove = append([]string{".dockerignore"}, filesToRemove...) + for _, fileToRemove := range filesToRemove { + if rm, _ := fileutils.Matches(fileToRemove, excludes); rm { + if err := c.Remove(fileToRemove); err != nil { + logrus.Errorf("failed to remove %s: %v", fileToRemove, err) + } + } + } + return nil +} + +func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) { + br := bufio.NewReader(rc) + if _, err := br.Peek(1); err != nil { + if err == io.EOF { + return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name) + } + return nil, errors.Wrap(err, "unexpected error reading Dockerfile") + } + return parser.Parse(br) +} + +func openAt(remote builder.Source, path string) (*os.File, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return os.Open(fullPath) +} + +// StatAt is a helper for calling Stat on a path from a source +func StatAt(remote builder.Source, path string) (os.FileInfo, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return os.Stat(fullPath) +} + +// FullPath is a helper for getting a full path for a path from a source +func FullPath(remote builder.Source, path string) (string, error) { + fullPath, err := symlink.FollowSymlinkInScope(filepath.Join(remote.Root(), path), remote.Root()) + if err != nil { + return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error + } + return fullPath, nil +} diff --git a/components/engine/builder/remotecontext/detect_test.go b/components/engine/builder/remotecontext/detect_test.go new file mode 100644 index 0000000000..6b47ac2274 --- /dev/null +++ b/components/engine/builder/remotecontext/detect_test.go @@ -0,0 +1,123 @@ +package remotecontext + +import ( + "errors" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/docker/docker/builder" +) + +const ( + dockerfileContents = "FROM busybox" + dockerignoreFilename = ".dockerignore" + testfileContents = "test" +) + +const shouldStayFilename = "should_stay" + +func extractFilenames(files []os.FileInfo) []string { + filenames := make([]string, len(files), len(files)) + + for i, file := range files { + filenames[i] = file.Name() + } + + return filenames +} + +func checkDirectory(t *testing.T, dir string, expectedFiles []string) { + files, err := ioutil.ReadDir(dir) + + if err != nil { + t.Fatalf("Could not read directory: %s", err) + } + + if len(files) != len(expectedFiles) { + log.Fatalf("Directory should contain exactly %d file(s), got %d", len(expectedFiles), len(files)) + } + + filenames := extractFilenames(files) + sort.Strings(filenames) + sort.Strings(expectedFiles) + + for i, filename := range filenames { + if filename != expectedFiles[i] { + t.Fatalf("File %s should be in the directory, got: %s", expectedFiles[i], filename) + } + } +} + +func executeProcess(t *testing.T, contextDir string) { + modifiableCtx := &stubRemote{root: contextDir} + + err := removeDockerfile(modifiableCtx, builder.DefaultDockerfileName) + + if err != nil { + t.Fatalf("Error when executing Process: %s", err) + } +} + +func TestProcessShouldRemoveDockerfileDockerignore(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, dockerignoreFilename, "Dockerfile\n.dockerignore", 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename}) + +} + +func TestProcessNoDockerignore(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName}) + +} + +func TestProcessShouldLeaveAllFiles(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) + createTestTempFile(t, contextDir, dockerignoreFilename, "input1\ninput2", 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName, dockerignoreFilename}) + +} + +// TODO: remove after moving to a separate pkg +type stubRemote struct { + root string +} + +func (r *stubRemote) Hash(path string) (string, error) { + return "", errors.New("not implemented") +} + +func (r *stubRemote) Root() string { + return r.root +} +func (r *stubRemote) Close() error { + return errors.New("not implemented") +} +func (r *stubRemote) Remove(p string) error { + return os.Remove(filepath.Join(r.root, p)) +} diff --git a/components/engine/builder/remotecontext/filehash.go b/components/engine/builder/remotecontext/filehash.go new file mode 100644 index 0000000000..a9b324272b --- /dev/null +++ b/components/engine/builder/remotecontext/filehash.go @@ -0,0 +1,34 @@ +package remotecontext + +import ( + "archive/tar" + "crypto/sha256" + "hash" + "os" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/tarsum" +) + +// NewFileHash returns new hash that is used for the builder cache keys +func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) { + hdr, err := archive.FileInfoHeader(path, name, fi) + if err != nil { + return nil, err + } + tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()} + tsh.Reset() // initialize header + return tsh, nil +} + +type tarsumHash struct { + hash.Hash + hdr *tar.Header +} + +// Reset resets the Hash to its initial state. +func (tsh *tarsumHash) Reset() { + // comply with hash.Hash and reset to the state hash had before any writes + tsh.Hash.Reset() + tarsum.WriteV1Header(tsh.hdr, tsh.Hash) +} diff --git a/components/engine/builder/remotecontext/git.go b/components/engine/builder/remotecontext/git.go new file mode 100644 index 0000000000..4204c9f7bb --- /dev/null +++ b/components/engine/builder/remotecontext/git.go @@ -0,0 +1,29 @@ +package remotecontext + +import ( + "os" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/gitutils" +) + +// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. +func MakeGitContext(gitURL string) (builder.Source, error) { + root, err := gitutils.Clone(gitURL) + if err != nil { + return nil, err + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return nil, err + } + + defer func() { + // TODO: print errors? + c.Close() + os.RemoveAll(root) + }() + return MakeTarSumContext(c) +} diff --git a/components/engine/builder/remotecontext/lazycontext.go b/components/engine/builder/remotecontext/lazycontext.go new file mode 100644 index 0000000000..283d82a7b1 --- /dev/null +++ b/components/engine/builder/remotecontext/lazycontext.go @@ -0,0 +1,101 @@ +package remotecontext + +import ( + "encoding/hex" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/pools" + "github.com/pkg/errors" +) + +// NewLazyContext creates a new LazyContext. LazyContext defines a hashed build +// context based on a root directory. Individual files are hashed first time +// they are asked. It is not safe to call methods of LazyContext concurrently. +func NewLazyContext(root string) (builder.Source, error) { + return &lazyContext{ + root: root, + sums: make(map[string]string), + }, nil +} + +type lazyContext struct { + root string + sums map[string]string +} + +func (c *lazyContext) Root() string { + return c.root +} + +func (c *lazyContext) Close() error { + return nil +} + +func (c *lazyContext) Hash(path string) (string, error) { + cleanPath, fullPath, err := normalize(path, c.root) + if err != nil { + return "", err + } + + fi, err := os.Lstat(fullPath) + if err != nil { + return "", err + } + + relPath, err := Rel(c.root, fullPath) + if err != nil { + return "", errors.WithStack(convertPathError(err, cleanPath)) + } + + sum, ok := c.sums[relPath] + if !ok { + sum, err = c.prepareHash(relPath, fi) + if err != nil { + return "", err + } + } + + return sum, nil +} + +func (c *lazyContext) prepareHash(relPath string, fi os.FileInfo) (string, error) { + p := filepath.Join(c.root, relPath) + h, err := NewFileHash(p, relPath, fi) + if err != nil { + return "", errors.Wrapf(err, "failed to create hash for %s", relPath) + } + if fi.Mode().IsRegular() && fi.Size() > 0 { + f, err := os.Open(p) + if err != nil { + return "", errors.Wrapf(err, "failed to open %s", relPath) + } + defer f.Close() + if _, err := pools.Copy(h, f); err != nil { + return "", errors.Wrapf(err, "failed to copy file data for %s", relPath) + } + } + sum := hex.EncodeToString(h.Sum(nil)) + c.sums[relPath] = sum + return sum, nil +} + +// Rel makes a path relative to base path. Same as `filepath.Rel` but can also +// handle UUID paths in windows. +func Rel(basepath, targpath string) (string, error) { + // filepath.Rel can't handle UUID paths in windows + if runtime.GOOS == "windows" { + pfx := basepath + `\` + if strings.HasPrefix(targpath, pfx) { + p := strings.TrimPrefix(targpath, pfx) + if p == "" { + p = "." + } + return p, nil + } + } + return filepath.Rel(basepath, targpath) +} diff --git a/components/engine/builder/remotecontext/remote.go b/components/engine/builder/remotecontext/remote.go new file mode 100644 index 0000000000..6be464fff1 --- /dev/null +++ b/components/engine/builder/remotecontext/remote.go @@ -0,0 +1,116 @@ +package remotecontext + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "regexp" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/httputils" +) + +// When downloading remote contexts, limit the amount (in bytes) +// to be read from the response body in order to detect its Content-Type +const maxPreambleLength = 100 + +const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` + +var mimeRe = regexp.MustCompile(acceptableRemoteMIME) + +// MakeRemoteContext downloads a context from remoteURL and returns it. +// +// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of +// maxPreambleLength bytes from the body to help detecting the MIME type. +// Look at acceptableRemoteMIME for more details. +// +// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected +// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). +// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned. +func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (builder.Source, error) { + f, err := httputils.Download(remoteURL) + if err != nil { + return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err) + } + defer f.Body.Close() + + var contextReader io.ReadCloser + if contentTypeHandlers != nil { + contentType := f.Header.Get("Content-Type") + clen := f.ContentLength + + contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) + if err != nil { + return nil, fmt.Errorf("error detecting content type for remote %s: %v", remoteURL, err) + } + defer contextReader.Close() + + // This loop tries to find a content-type handler for the detected content-type. + // If it could not find one from the caller-supplied map, it tries the empty content-type `""` + // which is interpreted as a fallback handler (usually used for raw tar contexts). + for _, ct := range []string{contentType, ""} { + if fn, ok := contentTypeHandlers[ct]; ok { + defer contextReader.Close() + if contextReader, err = fn(contextReader); err != nil { + return nil, err + } + break + } + } + } + + // Pass through - this is a pre-packaged context, presumably + // with a Dockerfile with the right name inside it. + return MakeTarSumContext(contextReader) +} + +// inspectResponse looks into the http response data at r to determine whether its +// content-type is on the list of acceptable content types for remote build contexts. +// This function returns: +// - a string representation of the detected content-type +// - an io.Reader for the response body +// - an error value which will be non-nil either when something goes wrong while +// reading bytes from r or when the detected content-type is not acceptable. +func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) { + plen := clen + if plen <= 0 || plen > maxPreambleLength { + plen = maxPreambleLength + } + + preamble := make([]byte, plen, plen) + rlen, err := r.Read(preamble) + if rlen == 0 { + return ct, r, errors.New("empty response") + } + if err != nil && err != io.EOF { + return ct, r, err + } + + preambleR := bytes.NewReader(preamble[:rlen]) + bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) + // Some web servers will use application/octet-stream as the default + // content type for files without an extension (e.g. 'Dockerfile') + // so if we receive this value we better check for text content + contentType := ct + if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream { + contentType, _, err = httputils.DetectContentType(preamble) + if err != nil { + return contentType, bodyReader, err + } + } + + contentType = selectAcceptableMIME(contentType) + var cterr error + if len(contentType) == 0 { + cterr = fmt.Errorf("unsupported Content-Type %q", ct) + contentType = ct + } + + return contentType, bodyReader, cterr +} + +func selectAcceptableMIME(ct string) string { + return mimeRe.FindString(ct) +} diff --git a/components/engine/builder/remotecontext/remote_test.go b/components/engine/builder/remotecontext/remote_test.go new file mode 100644 index 0000000000..af9db4b927 --- /dev/null +++ b/components/engine/builder/remotecontext/remote_test.go @@ -0,0 +1,234 @@ +package remotecontext + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" +) + +var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic + +func TestSelectAcceptableMIME(t *testing.T) { + validMimeStrings := []string{ + "application/x-bzip2", + "application/bzip2", + "application/gzip", + "application/x-gzip", + "application/x-xz", + "application/xz", + "application/tar", + "application/x-tar", + "application/octet-stream", + "text/plain", + } + + invalidMimeStrings := []string{ + "", + "application/octet", + "application/json", + } + + for _, m := range invalidMimeStrings { + if len(selectAcceptableMIME(m)) > 0 { + t.Fatalf("Should not have accepted %q", m) + } + } + + for _, m := range validMimeStrings { + if str := selectAcceptableMIME(m); str == "" { + t.Fatalf("Should have accepted %q", m) + } + } +} + +func TestInspectEmptyResponse(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader([]byte(""))) + contentType, bReader, err := inspectResponse(ct, br, 0) + if err == nil { + t.Fatal("Should have generated an error for an empty response") + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != 0 { + t.Fatal("response body should remain empty") + } +} + +func TestInspectResponseBinary(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader(binaryContext)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext))) + if err != nil { + t.Fatal(err) + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != len(binaryContext) { + t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body)) + } + for i := range body { + if body[i] != binaryContext[i] { + t.Fatalf("Corrupted response body at byte index %d", i) + } + } +} + +func TestResponseUnsupportedContentType(t *testing.T) { + content := []byte(dockerfileContents) + ct := "application/json" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(dockerfileContents))) + + if err == nil { + t.Fatal("Should have returned an error on content-type 'application/json'") + } + if contentType != ct { + t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseTextSimple(t *testing.T) { + content := []byte(dockerfileContents) + ct := "text/plain" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(content))) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseEmptyContentType(t *testing.T) { + content := []byte(dockerfileContents) + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bodyReader, err := inspectResponse("", br, int64(len(content))) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bodyReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestUnknownContentLength(t *testing.T) { + content := []byte(dockerfileContents) + ct := "text/plain" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, -1) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestMakeRemoteContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/" + builder.DefaultDockerfileName + remoteURL := serverURL.String() + + mux.Handle("/", http.FileServer(http.Dir(contextDir))) + + remoteContext, err := MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + r, err := archive.Generate(builder.DefaultDockerfileName, string(dockerfile)) + if err != nil { + return nil, err + } + return ioutil.NopCloser(r), nil + }, + }) + + if err != nil { + t.Fatalf("Error when executing DetectContextFromRemoteURL: %s", err) + } + + if remoteContext == nil { + t.Fatal("Remote context should not be nil") + } + + tarSumCtx, ok := remoteContext.(*tarSumContext) + + if !ok { + t.Fatal("Cast error, remote context should be casted to tarSumContext") + } + + fileInfoSums := tarSumCtx.sums + + if fileInfoSums.Len() != 1 { + t.Fatalf("Size of file info sums should be 1, got: %d", fileInfoSums.Len()) + } + + fileInfo := fileInfoSums.GetFile(builder.DefaultDockerfileName) + + if fileInfo == nil { + t.Fatalf("There should be file named %s in fileInfoSums", builder.DefaultDockerfileName) + } + + if fileInfo.Pos() != 0 { + t.Fatalf("File %s should have position 0, got %d", builder.DefaultDockerfileName, fileInfo.Pos()) + } +} diff --git a/components/engine/builder/remotecontext/tarsum.go b/components/engine/builder/remotecontext/tarsum.go new file mode 100644 index 0000000000..dabf590e59 --- /dev/null +++ b/components/engine/builder/remotecontext/tarsum.go @@ -0,0 +1,128 @@ +package remotecontext + +import ( + "io" + "os" + "path/filepath" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/tarsum" + "github.com/pkg/errors" +) + +type tarSumContext struct { + root string + sums tarsum.FileInfoSums +} + +func (c *tarSumContext) Close() error { + return os.RemoveAll(c.root) +} + +func convertPathError(err error, cleanpath string) error { + if err, ok := err.(*os.PathError); ok { + err.Path = cleanpath + return err + } + return err +} + +type modifiableContext interface { + builder.Source + // Remove deletes the entry specified by `path`. + // It is usual for directory entries to delete all its subentries. + Remove(path string) error +} + +// MakeTarSumContext returns a build Context from a tar stream. +// +// It extracts the tar stream to a temporary folder that is deleted as soon as +// the Context is closed. +// As the extraction happens, a tarsum is calculated for every file, and the set of +// all those sums then becomes the source of truth for all operations on this Context. +// +// Closing tarStream has to be done by the caller. +func MakeTarSumContext(tarStream io.Reader) (builder.Source, error) { + root, err := ioutils.TempDir("", "docker-builder") + if err != nil { + return nil, err + } + + tsc := &tarSumContext{root: root} + + // Make sure we clean-up upon error. In the happy case the caller + // is expected to manage the clean-up + defer func() { + if err != nil { + tsc.Close() + } + }() + + decompressedStream, err := archive.DecompressStream(tarStream) + if err != nil { + return nil, err + } + + sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) + if err != nil { + return nil, err + } + + err = chrootarchive.Untar(sum, root, nil) + if err != nil { + return nil, err + } + + tsc.sums = sum.GetSums() + + return tsc, nil +} + +func (c *tarSumContext) Root() string { + return c.root +} + +func (c *tarSumContext) Remove(path string) error { + _, fullpath, err := normalize(path, c.root) + if err != nil { + return err + } + return os.RemoveAll(fullpath) +} + +func (c *tarSumContext) Hash(path string) (string, error) { + cleanpath, fullpath, err := normalize(path, c.root) + if err != nil { + return "", err + } + + rel, err := filepath.Rel(c.root, fullpath) + if err != nil { + return "", convertPathError(err, cleanpath) + } + + // Use the checksum of the followed path(not the possible symlink) because + // this is the file that is actually copied. + if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + return tsInfo.Sum(), nil + } + // We set sum to path by default for the case where GetFile returns nil. + // The usual case is if relative path is empty. + return path, nil // backwards compat TODO: see if really needed +} + +func normalize(path, root string) (cleanPath, fullPath string, err error) { + cleanPath = filepath.Clean(string(os.PathSeparator) + path)[1:] + fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(root, path), root) + if err != nil { + return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath) + } + if _, err := os.Lstat(fullPath); err != nil { + return "", "", convertPathError(err, path) + } + return +} diff --git a/components/engine/builder/remotecontext/tarsum_test.go b/components/engine/builder/remotecontext/tarsum_test.go new file mode 100644 index 0000000000..35d38cef3c --- /dev/null +++ b/components/engine/builder/remotecontext/tarsum_test.go @@ -0,0 +1,159 @@ +package remotecontext + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +const ( + filename = "test" + contents = "contents test" +) + +func init() { + reexec.Init() +} + +func TestCloseRootDirectory(t *testing.T) { + contextDir, err := ioutil.TempDir("", "builder-tarsum-test") + + if err != nil { + t.Fatalf("Error with creating temporary directory: %s", err) + } + + tarsum := &tarSumContext{root: contextDir} + + err = tarsum.Close() + + if err != nil { + t.Fatalf("Error while executing Close: %s", err) + } + + _, err = os.Stat(contextDir) + + if !os.IsNotExist(err) { + t.Fatal("Directory should not exist at this point") + defer os.RemoveAll(contextDir) + } +} + +func TestHashFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, filename, contents, 0755) + + tarSum := makeTestTarsumContext(t, contextDir) + + sum, err := tarSum.Hash(filename) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if len(sum) == 0 { + t.Fatalf("Hash returned empty sum") + } + + expected := "1149ab94af7be6cc1da1335e398f24ee1cf4926b720044d229969dfc248ae7ec" + + if actual := sum; expected != actual { + t.Fatalf("invalid checksum. expected %s, got %s", expected, actual) + } +} + +func TestHashSubdir(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := filepath.Join(contextDir, "builder-tarsum-test-subdir") + err := os.Mkdir(contextSubdir, 0755) + if err != nil { + t.Fatalf("Failed to make directory: %s", contextSubdir) + } + + testFilename := createTestTempFile(t, contextSubdir, filename, contents, 0755) + + tarSum := makeTestTarsumContext(t, contextDir) + + relativePath, err := filepath.Rel(contextDir, testFilename) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + sum, err := tarSum.Hash(relativePath) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if len(sum) == 0 { + t.Fatalf("Hash returned empty sum") + } + + expected := "d7f8d6353dee4816f9134f4156bf6a9d470fdadfb5d89213721f7e86744a4e69" + + if actual := sum; expected != actual { + t.Fatalf("invalid checksum. expected %s, got %s", expected, actual) + } +} + +func TestStatNotExisting(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + tarSum := &tarSumContext{root: contextDir} + + _, err := tarSum.Hash("not-existing") + + if !os.IsNotExist(err) { + t.Fatalf("This file should not exist: %s", err) + } +} + +func TestRemoveDirectory(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + relativePath, err := filepath.Rel(contextDir, contextSubdir) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + tarSum := &tarSumContext{root: contextDir} + + err = tarSum.Remove(relativePath) + + if err != nil { + t.Fatalf("Error when executing Remove: %s", err) + } + + _, err = os.Stat(contextSubdir) + + if !os.IsNotExist(err) { + t.Fatal("Directory should not exist at this point") + } +} + +func makeTestTarsumContext(t *testing.T, dir string) builder.Source { + tarStream, err := archive.Tar(dir, archive.Uncompressed) + if err != nil { + t.Fatalf("error: %s", err) + } + defer tarStream.Close() + tarSum, err := MakeTarSumContext(tarStream) + if err != nil { + t.Fatalf("Error when executing MakeTarSumContext: %s", err) + } + return tarSum +} diff --git a/components/engine/builder/remotecontext/utils_test.go b/components/engine/builder/remotecontext/utils_test.go new file mode 100644 index 0000000000..1e23ab4f73 --- /dev/null +++ b/components/engine/builder/remotecontext/utils_test.go @@ -0,0 +1,55 @@ +package remotecontext + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path, func() { + err = os.RemoveAll(path) + + if err != nil { + t.Fatalf("Error when removing directory %s: %s", path, err) + } + } +} + +// createTestTempSubdir creates a temporary directory for testing. +// It returns the created path but doesn't provide a cleanup function, +// so createTestTempSubdir should be used only for creating temporary subdirectories +// whose parent directories are properly cleaned up. +// When an error occurs, it terminates the test. +func createTestTempSubdir(t *testing.T, dir, prefix string) string { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + + if err != nil { + t.Fatalf("Error when creating %s file: %s", filename, err) + } + + return filePath +} diff --git a/components/engine/cli/cli.go b/components/engine/cli/cli.go new file mode 100644 index 0000000000..4281667192 --- /dev/null +++ b/components/engine/cli/cli.go @@ -0,0 +1,25 @@ +package cli + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/homedir" +) + +var ( + configDir = os.Getenv("DOCKER_CONFIG") + configFileDir = ".docker" +) + +// ConfigurationDir returns the path to the configuration directory as specified by the DOCKER_CONFIG environment variable. +// TODO: this was copied from cli/config/configfile and should be removed once cmd/dockerd moves +func ConfigurationDir() string { + return configDir +} + +func init() { + if configDir == "" { + configDir = filepath.Join(homedir.Get(), configFileDir) + } +} diff --git a/components/engine/cli/cobra.go b/components/engine/cli/cobra.go new file mode 100644 index 0000000000..c7bb39c43d --- /dev/null +++ b/components/engine/cli/cobra.go @@ -0,0 +1,150 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// SetupRootCommand sets default usage, help, and error handling for the +// root command. +func SetupRootCommand(rootCmd *cobra.Command) { + cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) + cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) + cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) + cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) + cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) + + rootCmd.SetUsageTemplate(usageTemplate) + rootCmd.SetHelpTemplate(helpTemplate) + rootCmd.SetFlagErrorFunc(FlagErrorFunc) + rootCmd.SetHelpCommand(helpCommand) + + rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") + rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") +} + +// FlagErrorFunc prints an error message which matches the format of the +// docker/docker/cli error messages +func FlagErrorFunc(cmd *cobra.Command, err error) error { + if err == nil { + return nil + } + + usage := "" + if cmd.HasSubCommands() { + usage = "\n\n" + cmd.UsageString() + } + return StatusError{ + Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), + StatusCode: 125, + } +} + +var helpCommand = &cobra.Command{ + Use: "help [command]", + Short: "Help about the command", + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + PersistentPostRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(c *cobra.Command, args []string) error { + cmd, args, e := c.Root().Find(args) + if cmd == nil || e != nil || len(args) > 0 { + return errors.Errorf("unknown help topic: %v", strings.Join(args, " ")) + } + + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + return nil + }, +} + +func hasSubCommands(cmd *cobra.Command) bool { + return len(operationSubCommands(cmd)) > 0 +} + +func hasManagementSubCommands(cmd *cobra.Command) bool { + return len(managementSubCommands(cmd)) > 0 +} + +func operationSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && !sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +func wrappedFlagUsages(cmd *cobra.Command) string { + width := 80 + if ws, err := term.GetWinsize(0); err == nil { + width = int(ws.Width) + } + return cmd.Flags().FlagUsagesWrapped(width - 1) +} + +func managementSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +var usageTemplate = `Usage: + +{{- if not .HasSubCommands}} {{.UseLine}}{{end}} +{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} + +{{ .Short | trim }} + +{{- if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} + +{{- end}} +{{- if .HasExample}} + +Examples: +{{ .Example }} + +{{- end}} +{{- if .HasFlags}} + +Options: +{{ wrappedFlagUsages . | trimRightSpace}} + +{{- end}} +{{- if hasManagementSubCommands . }} + +Management Commands: + +{{- range managementSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} + +{{- end}} +{{- if hasSubCommands .}} + +Commands: + +{{- range operationSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} +{{- end}} + +{{- if .HasSubCommands }} + +Run '{{.CommandPath}} COMMAND --help' for more information on a command. +{{- end}} +` + +var helpTemplate = ` +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/components/engine/cli/debug/debug.go b/components/engine/cli/debug/debug.go new file mode 100644 index 0000000000..51dfab2a97 --- /dev/null +++ b/components/engine/cli/debug/debug.go @@ -0,0 +1,26 @@ +package debug + +import ( + "os" + + "github.com/Sirupsen/logrus" +) + +// Enable sets the DEBUG env var to true +// and makes the logger to log at debug level. +func Enable() { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) +} + +// Disable sets the DEBUG env var to false +// and makes the logger to log at info level. +func Disable() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) +} + +// IsEnabled checks whether the debug flag is set or not. +func IsEnabled() bool { + return os.Getenv("DEBUG") != "" +} diff --git a/components/engine/cli/debug/debug_test.go b/components/engine/cli/debug/debug_test.go new file mode 100644 index 0000000000..ad8412a944 --- /dev/null +++ b/components/engine/cli/debug/debug_test.go @@ -0,0 +1,43 @@ +package debug + +import ( + "os" + "testing" + + "github.com/Sirupsen/logrus" +) + +func TestEnable(t *testing.T) { + defer func() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) + }() + Enable() + if os.Getenv("DEBUG") != "1" { + t.Fatalf("expected DEBUG=1, got %s\n", os.Getenv("DEBUG")) + } + if logrus.GetLevel() != logrus.DebugLevel { + t.Fatalf("expected log level %v, got %v\n", logrus.DebugLevel, logrus.GetLevel()) + } +} + +func TestDisable(t *testing.T) { + Disable() + if os.Getenv("DEBUG") != "" { + t.Fatalf("expected DEBUG=\"\", got %s\n", os.Getenv("DEBUG")) + } + if logrus.GetLevel() != logrus.InfoLevel { + t.Fatalf("expected log level %v, got %v\n", logrus.InfoLevel, logrus.GetLevel()) + } +} + +func TestEnabled(t *testing.T) { + Enable() + if !IsEnabled() { + t.Fatal("expected debug enabled, got false") + } + Disable() + if IsEnabled() { + t.Fatal("expected debug disabled, got true") + } +} diff --git a/components/engine/cli/error.go b/components/engine/cli/error.go new file mode 100644 index 0000000000..62f62433b8 --- /dev/null +++ b/components/engine/cli/error.go @@ -0,0 +1,33 @@ +package cli + +import ( + "fmt" + "strings" +) + +// Errors is a list of errors. +// Useful in a loop if you don't want to return the error right away and you want to display after the loop, +// all the errors that happened during the loop. +type Errors []error + +func (errList Errors) Error() string { + if len(errList) < 1 { + return "" + } + + out := make([]string, len(errList)) + for i := range errList { + out[i] = errList[i].Error() + } + return strings.Join(out, ", ") +} + +// StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/components/engine/cli/flags/client.go b/components/engine/cli/flags/client.go new file mode 100644 index 0000000000..9b6940f6bd --- /dev/null +++ b/components/engine/cli/flags/client.go @@ -0,0 +1,13 @@ +package flags + +// ClientOptions are the options used to configure the client cli +type ClientOptions struct { + Common *CommonOptions + ConfigDir string + Version bool +} + +// NewClientOptions returns a new ClientOptions +func NewClientOptions() *ClientOptions { + return &ClientOptions{Common: NewCommonOptions()} +} diff --git a/components/engine/cli/flags/common.go b/components/engine/cli/flags/common.go new file mode 100644 index 0000000000..8f20111dce --- /dev/null +++ b/components/engine/cli/flags/common.go @@ -0,0 +1,117 @@ +package flags + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/go-connections/tlsconfig" + "github.com/spf13/pflag" +) + +const ( + // DefaultCaFile is the default filename for the CA pem file + DefaultCaFile = "ca.pem" + // DefaultKeyFile is the default filename for the key pem file + DefaultKeyFile = "key.pem" + // DefaultCertFile is the default filename for the cert pem file + DefaultCertFile = "cert.pem" + // FlagTLSVerify is the flag name for the TLS verification option + FlagTLSVerify = "tlsverify" +) + +var ( + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" +) + +// CommonOptions are options common to both the client and the daemon. +type CommonOptions struct { + Debug bool + Hosts []string + LogLevel string + TLS bool + TLSVerify bool + TLSOptions *tlsconfig.Options +} + +// NewCommonOptions returns a new CommonOptions +func NewCommonOptions() *CommonOptions { + return &CommonOptions{} +} + +// InstallFlags adds flags for the common options on the FlagSet +func (commonOpts *CommonOptions) InstallFlags(flags *pflag.FlagSet) { + if dockerCertPath == "" { + dockerCertPath = cli.ConfigurationDir() + } + + flags.BoolVarP(&commonOpts.Debug, "debug", "D", false, "Enable debug mode") + flags.StringVarP(&commonOpts.LogLevel, "log-level", "l", "info", `Set the logging level ("debug"|"info"|"warn"|"error"|"fatal")`) + flags.BoolVar(&commonOpts.TLS, "tls", false, "Use TLS; implied by --tlsverify") + flags.BoolVar(&commonOpts.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") + + // TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file") + + commonOpts.TLSOptions = &tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, DefaultCaFile), + CertFile: filepath.Join(dockerCertPath, DefaultCertFile), + KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), + } + tlsOptions := commonOpts.TLSOptions + flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA") + flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file") + flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file") + + hostOpt := opts.NewNamedListOptsRef("hosts", &commonOpts.Hosts, opts.ValidateHost) + flags.VarP(hostOpt, "host", "H", "Daemon socket(s) to connect to") +} + +// SetDefaultOptions sets default values for options after flag parsing is +// complete +func (commonOpts *CommonOptions) SetDefaultOptions(flags *pflag.FlagSet) { + // Regardless of whether the user sets it to true or false, if they + // specify --tlsverify at all then we need to turn on TLS + // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need + // to check that here as well + if flags.Changed(FlagTLSVerify) || commonOpts.TLSVerify { + commonOpts.TLS = true + } + + if !commonOpts.TLS { + commonOpts.TLSOptions = nil + } else { + tlsOptions := commonOpts.TLSOptions + tlsOptions.InsecureSkipVerify = !commonOpts.TLSVerify + + // Reset CertFile and KeyFile to empty string if the user did not specify + // the respective flags and the respective default files were not found. + if !flags.Changed("tlscert") { + if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { + tlsOptions.CertFile = "" + } + } + if !flags.Changed("tlskey") { + if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { + tlsOptions.KeyFile = "" + } + } + } +} + +// SetLogLevel sets the logrus logging level +func SetLogLevel(logLevel string) { + if logLevel != "" { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) + os.Exit(1) + } + logrus.SetLevel(lvl) + } else { + logrus.SetLevel(logrus.InfoLevel) + } +} diff --git a/components/engine/cli/flags/common_test.go b/components/engine/cli/flags/common_test.go new file mode 100644 index 0000000000..2434b569ec --- /dev/null +++ b/components/engine/cli/flags/common_test.go @@ -0,0 +1,42 @@ +package flags + +import ( + "path/filepath" + "testing" + + "github.com/docker/docker/cli" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +func TestCommonOptionsInstallFlags(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := NewCommonOptions() + opts.InstallFlags(flags) + + err := flags.Parse([]string{ + "--tlscacert=\"/foo/cafile\"", + "--tlscert=\"/foo/cert\"", + "--tlskey=\"/foo/key\"", + }) + assert.NoError(t, err) + assert.Equal(t, "/foo/cafile", opts.TLSOptions.CAFile) + assert.Equal(t, "/foo/cert", opts.TLSOptions.CertFile) + assert.Equal(t, opts.TLSOptions.KeyFile, "/foo/key") +} + +func defaultPath(filename string) string { + return filepath.Join(cli.ConfigurationDir(), filename) +} + +func TestCommonOptionsInstallFlagsWithDefaults(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := NewCommonOptions() + opts.InstallFlags(flags) + + err := flags.Parse([]string{}) + assert.NoError(t, err) + assert.Equal(t, defaultPath("ca.pem"), opts.TLSOptions.CAFile) + assert.Equal(t, defaultPath("cert.pem"), opts.TLSOptions.CertFile) + assert.Equal(t, defaultPath("key.pem"), opts.TLSOptions.KeyFile) +} diff --git a/components/engine/cli/required.go b/components/engine/cli/required.go new file mode 100644 index 0000000000..d28af86be5 --- /dev/null +++ b/components/engine/cli/required.go @@ -0,0 +1,96 @@ +package cli + +import ( + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// NoArgs validates args and returns an error if there are any args +func NoArgs(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return nil + } + + if cmd.HasSubCommands() { + return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) + } + + return errors.Errorf( + "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) +} + +// RequiresMinArgs returns an error if there is not at least min args +func RequiresMinArgs(min int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min { + return nil + } + return errors.Errorf( + "\"%s\" requires at least %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresMaxArgs returns an error if there is not at most max args +func RequiresMaxArgs(max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) <= max { + return nil + } + return errors.Errorf( + "\"%s\" requires at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresRangeArgs returns an error if there is not at least min args and at most max args +func RequiresRangeArgs(min int, max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min && len(args) <= max { + return nil + } + return errors.Errorf( + "\"%s\" requires at least %d and at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// ExactArgs returns an error if there is not the exact number of args +func ExactArgs(number int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) == number { + return nil + } + return errors.Errorf( + "\"%s\" requires exactly %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + number, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} diff --git a/components/engine/client/README.md b/components/engine/client/README.md new file mode 100644 index 0000000000..059dfb3ce7 --- /dev/null +++ b/components/engine/client/README.md @@ -0,0 +1,35 @@ +# Go client for the Docker Engine API + +The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. + +For example, to list running containers (the equivalent of `docker ps`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } +} +``` + +[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/components/engine/client/checkpoint_create.go b/components/engine/client/checkpoint_create.go new file mode 100644 index 0000000000..0effe498be --- /dev/null +++ b/components/engine/client/checkpoint_create.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/checkpoint_create_test.go b/components/engine/client/checkpoint_create_test.go new file mode 100644 index 0000000000..96e5187618 --- /dev/null +++ b/components/engine/client/checkpoint_create_test.go @@ -0,0 +1,73 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CheckpointCreate(context.Background(), "nothing", types.CheckpointCreateOptions{ + CheckpointID: "noting", + Exit: true, + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointCreate(t *testing.T) { + expectedContainerID := "container_id" + expectedCheckpointID := "checkpoint_id" + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + createOptions := &types.CheckpointCreateOptions{} + if err := json.NewDecoder(req.Body).Decode(createOptions); err != nil { + return nil, err + } + + if createOptions.CheckpointID != expectedCheckpointID { + return nil, fmt.Errorf("expected CheckpointID to be 'checkpoint_id', got %v", createOptions.CheckpointID) + } + + if !createOptions.Exit { + return nil, fmt.Errorf("expected Exit to be true") + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointCreate(context.Background(), expectedContainerID, types.CheckpointCreateOptions{ + CheckpointID: expectedCheckpointID, + Exit: true, + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/checkpoint_delete.go b/components/engine/client/checkpoint_delete.go new file mode 100644 index 0000000000..e6e75588b1 --- /dev/null +++ b/components/engine/client/checkpoint_delete.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/checkpoint_delete_test.go b/components/engine/client/checkpoint_delete_test.go new file mode 100644 index 0000000000..a78b050487 --- /dev/null +++ b/components/engine/client/checkpoint_delete_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointDeleteError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointDelete(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints/checkpoint_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/checkpoint_list.go b/components/engine/client/checkpoint_list.go new file mode 100644 index 0000000000..ffe44bc976 --- /dev/null +++ b/components/engine/client/checkpoint_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointList returns the checkpoints of the given container in the docker host +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return checkpoints, containerNotFoundError{container} + } + return checkpoints, err + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + ensureReaderClosed(resp) + return checkpoints, err +} diff --git a/components/engine/client/checkpoint_list_test.go b/components/engine/client/checkpoint_list_test.go new file mode 100644 index 0000000000..388465715b --- /dev/null +++ b/components/engine/client/checkpoint_list_test.go @@ -0,0 +1,68 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointList(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal([]types.Checkpoint{ + { + Name: "checkpoint", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + checkpoints, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err != nil { + t.Fatal(err) + } + if len(checkpoints) != 1 { + t.Fatalf("expected 1 checkpoint, got %v", checkpoints) + } +} + +func TestCheckpointListContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.CheckpointList(context.Background(), "unknown", types.CheckpointListOptions{}) + if err == nil || !IsErrContainerNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} diff --git a/components/engine/client/client.go b/components/engine/client/client.go new file mode 100644 index 0000000000..f8f2fc6ad5 --- /dev/null +++ b/components/engine/client/client.go @@ -0,0 +1,284 @@ +/* +Package client is a Go client for the Docker Engine API. + +The "docker" command uses this package to communicate with the daemon. It can also +be used by your own Go applications to do anything the command-line interface does +- running containers, pulling images, managing swarms, etc. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/engine/reference/api/ + +Usage + +You use the library by creating a client object and calling methods on it. The +client can be created either from environment variables with NewEnvClient, or +configured manually with NewClient. + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + ) + + func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } + } + +*/ +package client + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool +} + +// CheckRedirect specifies the policy for dealing with redirect responses: +// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specificlaly 301, 307, and 308) in the client . +// The Docker client (and by extension docker API client) can be made to to send a request +// like POST /containers//start where what would normally be in the name section of the URL is empty. +// This triggers an HTTP 301 from the daemon. +// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. +// This behavior change manifests in the client in that before the 301 was not followed and +// the client did not generate an error, but now results in a message like Error response from daemon: page not found. +func CheckRedirect(req *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + +// NewEnvClient initializes a new API client based on environment variables. +// Use DOCKER_HOST to set the url to the docker server. +// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// Use DOCKER_CERT_PATH to load the TLS certificates from. +// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func NewEnvClient() (*Client, error) { + var client *http.Client + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + CheckRedirect: CheckRedirect, + } + } + + host := os.Getenv("DOCKER_HOST") + if host == "" { + host = DefaultDockerHost + } + version := os.Getenv("DOCKER_API_VERSION") + if version == "" { + version = api.DefaultVersion + } + + cli, err := NewClient(host, version, client, nil) + if err != nil { + return cli, err + } + if os.Getenv("DOCKER_API_VERSION") != "" { + cli.manualOverride = true + } + return cli, nil +} + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + proto, addr, basePath, err := ParseHost(host) + if err != nil { + return nil, err + } + + if client != nil { + if _, ok := client.Transport.(*http.Transport); !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) + } + } else { + transport := new(http.Transport) + sockets.ConfigureTransport(transport, proto, addr) + client = &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, + } + } + + scheme := "http" + tlsConfig := resolveTLSConfig(client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + scheme = "https" + } + + return &Client{ + scheme: scheme, + host: host, + proto: proto, + addr: addr, + basePath: basePath, + client: client, + version: version, + customHTTPHeaders: httpHeaders, + }, nil +} + +// Close ensures that transport.Client is closed +// especially needed while using NewClient with *http.Client = nil +// for example +// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"}) +func (cli *Client) Close() error { + + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + + return nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(p string, query url.Values) string { + var apiPath string + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) + } else { + apiPath = fmt.Sprintf("%s%s", cli.basePath, p) + } + + u := &url.URL{ + Path: apiPath, + } + if len(query) > 0 { + u.RawQuery = query.Encode() + } + return u.String() +} + +// ClientVersion returns the version string associated with this +// instance of the Client. Note that this value can be changed +// via the DOCKER_API_VERSION env var. +// This operation doesn't acquire a mutex. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// UpdateClientVersion updates the version string associated with this +// instance of the Client. This operation doesn't acquire a mutex. +func (cli *Client) UpdateClientVersion(v string) { + if !cli.manualOverride { + cli.version = v + } + +} + +// ParseHost verifies that the given host strings is valid. +func ParseHost(host string) (string, string, string, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return "", "", "", err + } + addr = parsed.Host + basePath = parsed.Path + } + return proto, addr, basePath, nil +} + +// CustomHTTPHeaders returns the custom http headers associated with this +// instance of the Client. This operation doesn't acquire a mutex. +func (cli *Client) CustomHTTPHeaders() map[string]string { + m := make(map[string]string) + for k, v := range cli.customHTTPHeaders { + m[k] = v + } + return m +} + +// SetCustomHTTPHeaders updates the custom http headers associated with this +// instance of the Client. This operation doesn't acquire a mutex. +func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { + cli.customHTTPHeaders = headers +} diff --git a/components/engine/client/client_mock_test.go b/components/engine/client/client_mock_test.go new file mode 100644 index 0000000000..0ab935d536 --- /dev/null +++ b/components/engine/client/client_mock_test.go @@ -0,0 +1,45 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" +) + +func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client { + return &http.Client{ + Transport: transportFunc(doer), + } +} + +func errorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", "application/json") + + body, err := json.Marshal(&types.ErrorResponse{ + Message: message, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader(body)), + Header: header, + }, nil + } +} + +func plainTextErrorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader([]byte(message))), + }, nil + } +} diff --git a/components/engine/client/client_test.go b/components/engine/client/client_test.go new file mode 100644 index 0000000000..77214bc53c --- /dev/null +++ b/components/engine/client/client_test.go @@ -0,0 +1,334 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestNewEnvClient(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping unix only test for windows") + } + cases := []struct { + envs map[string]string + expectedError string + expectedVersion string + }{ + { + envs: map[string]string{}, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "invalid/path", + }, + expectedError: "Could not load X509 key pair: open invalid/path/cert.pem: no such file or directory", + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + }, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_TLS_VERIFY": "1", + }, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_HOST": "https://notaunixsocket", + }, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_HOST": "host", + }, + expectedError: "unable to parse docker host `host`", + }, + { + envs: map[string]string{ + "DOCKER_HOST": "invalid://url", + }, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "anything", + }, + expectedVersion: "anything", + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "1.22", + }, + expectedVersion: "1.22", + }, + } + for _, c := range cases { + recoverEnvs := setupEnvs(t, c.envs) + apiclient, err := NewEnvClient() + if c.expectedError != "" { + if err == nil { + t.Errorf("expected an error for %v", c) + } else if err.Error() != c.expectedError { + t.Errorf("expected an error %s, got %s, for %v", c.expectedError, err.Error(), c) + } + } else { + if err != nil { + t.Error(err) + } + version := apiclient.ClientVersion() + if version != c.expectedVersion { + t.Errorf("expected %s, got %s, for %v", c.expectedVersion, version, c) + } + } + + if c.envs["DOCKER_TLS_VERIFY"] != "" { + // pedantic checking that this is handled correctly + tr := apiclient.client.Transport.(*http.Transport) + if tr.TLSClientConfig == nil { + t.Error("no TLS config found when DOCKER_TLS_VERIFY enabled") + } + + if tr.TLSClientConfig.InsecureSkipVerify { + t.Error("TLS verification should be enabled") + } + } + + recoverEnvs(t) + } +} + +func setupEnvs(t *testing.T, envs map[string]string) func(*testing.T) { + oldEnvs := map[string]string{} + for key, value := range envs { + oldEnv := os.Getenv(key) + oldEnvs[key] = oldEnv + err := os.Setenv(key, value) + if err != nil { + t.Error(err) + } + } + return func(t *testing.T) { + for key, value := range oldEnvs { + err := os.Setenv(key, value) + if err != nil { + t.Error(err) + } + } + } +} + +func TestGetAPIPath(t *testing.T) { + cases := []struct { + v string + p string + q url.Values + e string + }{ + {"", "/containers/json", nil, "/containers/json"}, + {"", "/containers/json", url.Values{}, "/containers/json"}, + {"", "/containers/json", url.Values{"s": []string{"c"}}, "/containers/json?s=c"}, + {"1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/networks/kiwl$%^", nil, "/v1.22/networks/kiwl$%25%5E"}, + } + + for _, cs := range cases { + c, err := NewClient("unix:///var/run/docker.sock", cs.v, nil, nil) + if err != nil { + t.Fatal(err) + } + g := c.getAPIPath(cs.p, cs.q) + if g != cs.e { + t.Fatalf("Expected %s, got %s", cs.e, g) + } + + err = c.Close() + if nil != err { + t.Fatalf("close client failed, error message: %s", err) + } + } +} + +func TestParseHost(t *testing.T) { + cases := []struct { + host string + proto string + addr string + base string + err bool + }{ + {"", "", "", "", true}, + {"foobar", "", "", "", true}, + {"foo://bar", "foo", "bar", "", false}, + {"tcp://localhost:2476", "tcp", "localhost:2476", "", false}, + {"tcp://localhost:2476/path", "tcp", "localhost:2476", "/path", false}, + } + + for _, cs := range cases { + p, a, b, e := ParseHost(cs.host) + if cs.err && e == nil { + t.Fatalf("expected error, got nil") + } + if !cs.err && e != nil { + t.Fatal(e) + } + if cs.proto != p { + t.Fatalf("expected proto %s, got %s", cs.proto, p) + } + if cs.addr != a { + t.Fatalf("expected addr %s, got %s", cs.addr, a) + } + if cs.base != b { + t.Fatalf("expected base %s, got %s", cs.base, b) + } + } +} + +func TestUpdateClientVersion(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + splitQuery := strings.Split(req.URL.Path, "/") + queryVersion := splitQuery[1] + b, err := json.Marshal(types.Version{ + APIVersion: queryVersion, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + cases := []struct { + v string + }{ + {"1.20"}, + {"v1.21"}, + {"1.22"}, + {"v1.22"}, + } + + for _, cs := range cases { + client.UpdateClientVersion(cs.v) + r, err := client.ServerVersion(context.Background()) + if err != nil { + t.Fatal(err) + } + if strings.TrimPrefix(r.APIVersion, "v") != strings.TrimPrefix(cs.v, "v") { + t.Fatalf("Expected %s, got %s", cs.v, r.APIVersion) + } + } +} + +func TestNewEnvClientSetsDefaultVersion(t *testing.T) { + // Unset environment variables + envVarKeys := []string{ + "DOCKER_HOST", + "DOCKER_API_VERSION", + "DOCKER_TLS_VERIFY", + "DOCKER_CERT_PATH", + } + envVarValues := make(map[string]string) + for _, key := range envVarKeys { + envVarValues[key] = os.Getenv(key) + os.Setenv(key, "") + } + + client, err := NewEnvClient() + if err != nil { + t.Fatal(err) + } + if client.version != api.DefaultVersion { + t.Fatalf("Expected %s, got %s", api.DefaultVersion, client.version) + } + + expected := "1.22" + os.Setenv("DOCKER_API_VERSION", expected) + client, err = NewEnvClient() + if err != nil { + t.Fatal(err) + } + if client.version != expected { + t.Fatalf("Expected %s, got %s", expected, client.version) + } + + // Restore environment variables + for _, key := range envVarKeys { + os.Setenv(key, envVarValues[key]) + } +} + +type roundTripFunc func(*http.Request) (*http.Response, error) + +func (rtf roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return rtf(req) +} + +type bytesBufferClose struct { + *bytes.Buffer +} + +func (bbc bytesBufferClose) Close() error { + return nil +} + +func TestClientRedirect(t *testing.T) { + client := &http.Client{ + CheckRedirect: CheckRedirect, + Transport: roundTripFunc(func(req *http.Request) (*http.Response, error) { + if req.URL.String() == "/bla" { + return &http.Response{StatusCode: 404}, nil + } + return &http.Response{ + StatusCode: 301, + Header: map[string][]string{"Location": {"/bla"}}, + Body: bytesBufferClose{bytes.NewBuffer(nil)}, + }, nil + }), + } + + cases := []struct { + httpMethod string + expectedErr error + statusCode int + }{ + {http.MethodGet, nil, 301}, + {http.MethodPost, &url.Error{Op: "Post", URL: "/bla", Err: ErrRedirect}, 301}, + {http.MethodPut, &url.Error{Op: "Put", URL: "/bla", Err: ErrRedirect}, 301}, + {http.MethodDelete, &url.Error{Op: "Delete", URL: "/bla", Err: ErrRedirect}, 301}, + } + + for _, tc := range cases { + req, err := http.NewRequest(tc.httpMethod, "/redirectme", nil) + assert.NoError(t, err) + resp, err := client.Do(req) + assert.Equal(t, tc.expectedErr, err) + assert.Equal(t, tc.statusCode, resp.StatusCode) + } +} diff --git a/components/engine/client/client_unix.go b/components/engine/client/client_unix.go new file mode 100644 index 0000000000..89de892c85 --- /dev/null +++ b/components/engine/client/client_unix.go @@ -0,0 +1,6 @@ +// +build linux freebsd solaris openbsd darwin + +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/components/engine/client/client_windows.go b/components/engine/client/client_windows.go new file mode 100644 index 0000000000..07c0c7a774 --- /dev/null +++ b/components/engine/client/client_windows.go @@ -0,0 +1,4 @@ +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/components/engine/client/config_create.go b/components/engine/client/config_create.go new file mode 100644 index 0000000000..7ddeaf4b43 --- /dev/null +++ b/components/engine/client/config_create.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigCreate creates a new Config. +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + var response types.ConfigCreateResponse + resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/components/engine/client/config_create_test.go b/components/engine/client/config_create_test.go new file mode 100644 index 0000000000..accc554133 --- /dev/null +++ b/components/engine/client/config_create_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestConfigCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigCreate(t *testing.T) { + expectedURL := "/configs/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.ConfigCreateResponse{ + ID: "test_config", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusCreated, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "test_config" { + t.Fatalf("expected `test_config`, got %s", r.ID) + } +} diff --git a/components/engine/client/config_inspect.go b/components/engine/client/config_inspect.go new file mode 100644 index 0000000000..1917b181fa --- /dev/null +++ b/components/engine/client/config_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigInspectWithRaw returns the config information with raw data +func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return swarm.Config{}, nil, configNotFoundError{id} + } + return swarm.Config{}, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Config{}, nil, err + } + + var config swarm.Config + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&config) + + return config, body, err +} diff --git a/components/engine/client/config_inspect_test.go b/components/engine/client/config_inspect_test.go new file mode 100644 index 0000000000..b9597cbc3f --- /dev/null +++ b/components/engine/client/config_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestConfigInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ConfigInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigInspectConfigNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ConfigInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrConfigNotFound(err) { + t.Fatalf("expected a configNotFoundError error, got %v", err) + } +} + +func TestConfigInspect(t *testing.T) { + expectedURL := "/configs/config_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Config{ + ID: "config_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + configInspect, _, err := client.ConfigInspectWithRaw(context.Background(), "config_id") + if err != nil { + t.Fatal(err) + } + if configInspect.ID != "config_id" { + t.Fatalf("expected `config_id`, got %s", configInspect.ID) + } +} diff --git a/components/engine/client/config_list.go b/components/engine/client/config_list.go new file mode 100644 index 0000000000..cea11708a7 --- /dev/null +++ b/components/engine/client/config_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/configs", query, nil) + if err != nil { + return nil, err + } + + var configs []swarm.Config + err = json.NewDecoder(resp.body).Decode(&configs) + ensureReaderClosed(resp) + return configs, err +} diff --git a/components/engine/client/config_list_test.go b/components/engine/client/config_list_test.go new file mode 100644 index 0000000000..101e6eafdb --- /dev/null +++ b/components/engine/client/config_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestConfigListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ConfigList(context.Background(), types.ConfigListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigList(t *testing.T) { + expectedURL := "/configs" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.ConfigListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ConfigListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.ConfigListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Config{ + { + ID: "config_id1", + }, + { + ID: "config_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + configs, err := client.ConfigList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(configs) != 2 { + t.Fatalf("expected 2 configs, got %v", configs) + } + } +} diff --git a/components/engine/client/config_remove.go b/components/engine/client/config_remove.go new file mode 100644 index 0000000000..f04b8df17a --- /dev/null +++ b/components/engine/client/config_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ConfigRemove removes a Config. +func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/config_remove_test.go b/components/engine/client/config_remove_test.go new file mode 100644 index 0000000000..4a9c69937f --- /dev/null +++ b/components/engine/client/config_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestConfigRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ConfigRemove(context.Background(), "config_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigRemove(t *testing.T) { + expectedURL := "/configs/config_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ConfigRemove(context.Background(), "config_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/config_update.go b/components/engine/client/config_update.go new file mode 100644 index 0000000000..da171cb44c --- /dev/null +++ b/components/engine/client/config_update.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigUpdate attempts to update a Config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/config_update_test.go b/components/engine/client/config_update_test.go new file mode 100644 index 0000000000..64c2f27263 --- /dev/null +++ b/components/engine/client/config_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestConfigUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigUpdate(t *testing.T) { + expectedURL := "/configs/config_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/container_attach.go b/components/engine/client/container_attach.go new file mode 100644 index 0000000000..eea4682158 --- /dev/null +++ b/components/engine/client/container_attach.go @@ -0,0 +1,37 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + headers := map[string][]string{"Content-Type": {"text/plain"}} + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/components/engine/client/container_commit.go b/components/engine/client/container_commit.go new file mode 100644 index 0000000000..531d796ee7 --- /dev/null +++ b/components/engine/client/container_commit.go @@ -0,0 +1,55 @@ +package client + +import ( + "encoding/json" + "errors" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerCommit applies changes into a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { + var repository, tag string + if options.Reference != "" { + ref, err := reference.ParseNormalizedNamed(options.Reference) + if err != nil { + return types.IDResponse{}, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + } + ref = reference.TagNameOnly(ref) + + if tagged, ok := ref.(reference.Tagged); ok { + tag = tagged.Tag() + } + repository = reference.FamiliarName(ref) + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if options.Pause != true { + query.Set("pause", "0") + } + + var response types.IDResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/components/engine/client/container_commit_test.go b/components/engine/client/container_commit_test.go new file mode 100644 index 0000000000..6947ed3861 --- /dev/null +++ b/components/engine/client/container_commit_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerCommitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCommit(context.Background(), "nothing", types.ContainerCommitOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerCommit(t *testing.T) { + expectedURL := "/commit" + expectedContainerID := "container_id" + specifiedReference := "repository_name:tag" + expectedRepositoryName := "repository_name" + expectedTag := "tag" + expectedComment := "comment" + expectedAuthor := "author" + expectedChanges := []string{"change1", "change2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + containerID := query.Get("container") + if containerID != expectedContainerID { + return nil, fmt.Errorf("container id not set in URL query properly. Expected '%s', got %s", expectedContainerID, containerID) + } + repo := query.Get("repo") + if repo != expectedRepositoryName { + return nil, fmt.Errorf("container repo not set in URL query properly. Expected '%s', got %s", expectedRepositoryName, repo) + } + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("container tag not set in URL query properly. Expected '%s', got %s'", expectedTag, tag) + } + comment := query.Get("comment") + if comment != expectedComment { + return nil, fmt.Errorf("container comment not set in URL query properly. Expected '%s', got %s'", expectedComment, comment) + } + author := query.Get("author") + if author != expectedAuthor { + return nil, fmt.Errorf("container author not set in URL query properly. Expected '%s', got %s'", expectedAuthor, author) + } + pause := query.Get("pause") + if pause != "0" { + return nil, fmt.Errorf("container pause not set in URL query properly. Expected 'true', got %v'", pause) + } + changes := query["changes"] + if len(changes) != len(expectedChanges) { + return nil, fmt.Errorf("expected container changes size to be '%d', got %d", len(expectedChanges), len(changes)) + } + b, err := json.Marshal(types.IDResponse{ + ID: "new_container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCommit(context.Background(), expectedContainerID, types.ContainerCommitOptions{ + Reference: specifiedReference, + Comment: expectedComment, + Author: expectedAuthor, + Changes: expectedChanges, + Pause: false, + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "new_container_id" { + t.Fatalf("expected `new_container_id`, got %s", r.ID) + } +} diff --git a/components/engine/client/container_copy.go b/components/engine/client/container_copy.go new file mode 100644 index 0000000000..545aa54383 --- /dev/null +++ b/components/engine/client/container_copy.go @@ -0,0 +1,101 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStatPath returns Stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := fmt.Sprintf("/containers/%s/archive", containerID) + response, err := cli.head(ctx, urlStr, query, nil) + if err != nil { + return types.ContainerPathStat{}, err + } + defer ensureReaderClosed(response) + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + if err != nil { + return err + } + defer ensureReaderClosed(response) + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, err + } + + if response.statusCode != http.StatusOK { + return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/components/engine/client/container_copy_test.go b/components/engine/client/container_copy_test.go new file mode 100644 index 0000000000..c84f82e9fb --- /dev/null +++ b/components/engine/client/container_copy_test.go @@ -0,0 +1,244 @@ +package client + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStatPathError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestContainerStatPathNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestContainerStatPath(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "HEAD" { + return nil, fmt.Errorf("expected HEAD method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly") + } + content, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(content) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + stat, err := client.ContainerStatPath(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } +} + +func TestCopyToContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyToContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyToContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "PUT" { + return nil, fmt.Errorf("expected PUT method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + noOverwriteDirNonDir := query.Get("noOverwriteDirNonDir") + if noOverwriteDirNonDir != "true" { + return nil, fmt.Errorf("noOverwriteDirNonDir not set in URL query properly, expected true, got %s", noOverwriteDirNonDir) + } + + content, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + if err := req.Body.Close(); err != nil { + return nil, err + } + if string(content) != "content" { + return nil, fmt.Errorf("expected content to be 'content', got %s", string(content)) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.CopyToContainer(context.Background(), "container_id", expectedPath, bytes.NewReader([]byte("content")), types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestCopyFromContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyFromContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyFromContainerNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestCopyFromContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + + headercontent, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(headercontent) + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("content"))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + r, stat, err := client.CopyFromContainer(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } + content, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if err := r.Close(); err != nil { + t.Fatal(err) + } + if string(content) != "content" { + t.Fatalf("expected content to be 'content', got %s", string(content)) + } +} diff --git a/components/engine/client/container_create.go b/components/engine/client/container_create.go new file mode 100644 index 0000000000..6841b0b282 --- /dev/null +++ b/components/engine/client/container_create.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based in the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { + var response container.ContainerCreateCreatedBody + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + + // When using API 1.24 and under, the client is responsible for removing the container + if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { + hostConfig.AutoRemove = false + } + + query := url.Values{} + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + if err != nil { + if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { + return response, imageNotFoundError{config.Image} + } + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/components/engine/client/container_create_test.go b/components/engine/client/container_create_test.go new file mode 100644 index 0000000000..3ab608c21e --- /dev/null +++ b/components/engine/client/container_create_test.go @@ -0,0 +1,118 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusInternalServerError, got %v", err) + } + + // 404 doesn't automatically means an unknown image + client = &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + _, err = client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusNotFound, got %v", err) + } +} + +func TestContainerCreateImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "No such image")), + } + _, err := client.ContainerCreate(context.Background(), &container.Config{Image: "unknown_image"}, nil, nil, "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestContainerCreateWithName(t *testing.T) { + expectedURL := "/containers/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "container_name" { + return nil, fmt.Errorf("container name not set in URL query properly. Expected `container_name`, got %s", name) + } + b, err := json.Marshal(container.ContainerCreateCreatedBody{ + ID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCreate(context.Background(), nil, nil, nil, "container_name") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } +} + +// TestContainerCreateAutoRemove validates that a client using API 1.24 always disables AutoRemove. When using API 1.25 +// or up, AutoRemove should not be disabled. +func TestContainerCreateAutoRemove(t *testing.T) { + autoRemoveValidator := func(expectedValue bool) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + var config configWrapper + + if err := json.NewDecoder(req.Body).Decode(&config); err != nil { + return nil, err + } + if config.HostConfig.AutoRemove != expectedValue { + return nil, fmt.Errorf("expected AutoRemove to be %v, got %v", expectedValue, config.HostConfig.AutoRemove) + } + b, err := json.Marshal(container.ContainerCreateCreatedBody{ + ID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } + } + + client := &Client{ + client: newMockClient(autoRemoveValidator(false)), + version: "1.24", + } + if _, err := client.ContainerCreate(context.Background(), nil, &container.HostConfig{AutoRemove: true}, nil, ""); err != nil { + t.Fatal(err) + } + client = &Client{ + client: newMockClient(autoRemoveValidator(true)), + version: "1.25", + } + if _, err := client.ContainerCreate(context.Background(), nil, &container.HostConfig{AutoRemove: true}, nil, ""); err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/container_diff.go b/components/engine/client/container_diff.go new file mode 100644 index 0000000000..884dc9feef --- /dev/null +++ b/components/engine/client/container_diff.go @@ -0,0 +1,23 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { + var changes []container.ContainerChangeResponseItem + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + ensureReaderClosed(serverResp) + return changes, err +} diff --git a/components/engine/client/container_diff_test.go b/components/engine/client/container_diff_test.go new file mode 100644 index 0000000000..57dd73e66d --- /dev/null +++ b/components/engine/client/container_diff_test.go @@ -0,0 +1,61 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerDiffError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerDiff(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + +} + +func TestContainerDiff(t *testing.T) { + expectedURL := "/containers/container_id/changes" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal([]container.ContainerChangeResponseItem{ + { + Kind: 0, + Path: "/path/1", + }, + { + Kind: 1, + Path: "/path/2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + changes, err := client.ContainerDiff(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if len(changes) != 2 { + t.Fatalf("expected an array of 2 changes, got %v", changes) + } +} diff --git a/components/engine/client/container_exec.go b/components/engine/client/container_exec.go new file mode 100644 index 0000000000..0665c54fbd --- /dev/null +++ b/components/engine/client/container_exec.go @@ -0,0 +1,54 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { + headers := map[string][]string{"Content-Type": {"application/json"}} + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/components/engine/client/container_exec_test.go b/components/engine/client/container_exec_test.go new file mode 100644 index 0000000000..0e296a50ad --- /dev/null +++ b/components/engine/client/container_exec_test.go @@ -0,0 +1,157 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerExecCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecCreate(t *testing.T) { + expectedURL := "/containers/container_id/exec" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + // FIXME validate the content is the given ExecConfig ? + if err := req.ParseForm(); err != nil { + return nil, err + } + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(req.Body).Decode(execConfig); err != nil { + return nil, err + } + if execConfig.User != "user" { + return nil, fmt.Errorf("expected an execConfig with User == 'user', got %v", execConfig) + } + b, err := json.Marshal(types.IDResponse{ + ID: "exec_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{ + User: "user", + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "exec_id" { + t.Fatalf("expected `exec_id`, got %s", r.ID) + } +} + +func TestContainerExecStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecStart(context.Background(), "nothing", types.ExecStartCheck{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecStart(t *testing.T) { + expectedURL := "/exec/exec_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if err := req.ParseForm(); err != nil { + return nil, err + } + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(req.Body).Decode(execStartCheck); err != nil { + return nil, err + } + if execStartCheck.Tty || !execStartCheck.Detach { + return nil, fmt.Errorf("expected execStartCheck{Detach:true,Tty:false}, got %v", execStartCheck) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerExecStart(context.Background(), "exec_id", types.ExecStartCheck{ + Detach: true, + Tty: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecInspect(t *testing.T) { + expectedURL := "/exec/exec_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(types.ContainerExecInspect{ + ExecID: "exec_id", + ContainerID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + inspect, err := client.ContainerExecInspect(context.Background(), "exec_id") + if err != nil { + t.Fatal(err) + } + if inspect.ExecID != "exec_id" { + t.Fatalf("expected ExecID to be `exec_id`, got %s", inspect.ExecID) + } + if inspect.ContainerID != "container_id" { + t.Fatalf("expected ContainerID `container_id`, got %s", inspect.ContainerID) + } +} diff --git a/components/engine/client/container_export.go b/components/engine/client/container_export.go new file mode 100644 index 0000000000..52194f3d34 --- /dev/null +++ b/components/engine/client/container_export.go @@ -0,0 +1,20 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/components/engine/client/container_export_test.go b/components/engine/client/container_export_test.go new file mode 100644 index 0000000000..5849fe9252 --- /dev/null +++ b/components/engine/client/container_export_test.go @@ -0,0 +1,50 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerExportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExport(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExport(t *testing.T) { + expectedURL := "/containers/container_id/export" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerExport(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } +} diff --git a/components/engine/client/container_inspect.go b/components/engine/client/container_inspect.go new file mode 100644 index 0000000000..17f1809747 --- /dev/null +++ b/components/engine/client/container_inspect.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, nil, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/components/engine/client/container_inspect_test.go b/components/engine/client/container_inspect_test.go new file mode 100644 index 0000000000..98f83bd8db --- /dev/null +++ b/components/engine/client/container_inspect_test.go @@ -0,0 +1,125 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "unknown") + if err == nil || !IsErrContainerNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} + +func TestContainerInspect(t *testing.T) { + expectedURL := "/containers/container_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.Image) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.Name) + } +} + +func TestContainerInspectNode(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + Node: &types.ContainerNode{ + ID: "container_node_id", + Addr: "container_node", + Labels: map[string]string{"foo": "bar"}, + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.Image) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.Name) + } + if r.Node.ID != "container_node_id" { + t.Fatalf("expected `container_node_id`, got %s", r.Node.ID) + } + if r.Node.Addr != "container_node" { + t.Fatalf("expected `container_node`, got %s", r.Node.Addr) + } + foo, ok := r.Node.Labels["foo"] + if foo != "bar" || !ok { + t.Fatalf("expected `bar` for label `foo`") + } +} diff --git a/components/engine/client/container_kill.go b/components/engine/client/container_kill.go new file mode 100644 index 0000000000..29f80c73ad --- /dev/null +++ b/components/engine/client/container_kill.go @@ -0,0 +1,17 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + query.Set("signal", signal) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/container_kill_test.go b/components/engine/client/container_kill_test.go new file mode 100644 index 0000000000..9477b0abd2 --- /dev/null +++ b/components/engine/client/container_kill_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerKillError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerKill(context.Background(), "nothing", "SIGKILL") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerKill(t *testing.T) { + expectedURL := "/containers/container_id/kill" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + signal := req.URL.Query().Get("signal") + if signal != "SIGKILL" { + return nil, fmt.Errorf("signal not set in URL query properly. Expected 'SIGKILL', got %s", signal) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerKill(context.Background(), "container_id", "SIGKILL") + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/container_list.go b/components/engine/client/container_list.go new file mode 100644 index 0000000000..4398912197 --- /dev/null +++ b/components/engine/client/container_list.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit != -1 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + ensureReaderClosed(resp) + return containers, err +} diff --git a/components/engine/client/container_list_test.go b/components/engine/client/container_list_test.go new file mode 100644 index 0000000000..e41c6874b5 --- /dev/null +++ b/components/engine/client/container_list_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestContainerListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerList(t *testing.T) { + expectedURL := "/containers/json" + expectedFilters := `{"before":{"container":true},"label":{"label1":true,"label2":true}}` + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + all := query.Get("all") + if all != "1" { + return nil, fmt.Errorf("all not set in URL query properly. Expected '1', got %s", all) + } + limit := query.Get("limit") + if limit != "0" { + return nil, fmt.Errorf("limit should have not be present in query. Expected '0', got %s", limit) + } + since := query.Get("since") + if since != "container" { + return nil, fmt.Errorf("since not set in URL query properly. Expected 'container', got %s", since) + } + before := query.Get("before") + if before != "" { + return nil, fmt.Errorf("before should have not be present in query, go %s", before) + } + size := query.Get("size") + if size != "1" { + return nil, fmt.Errorf("size not set in URL query properly. Expected '1', got %s", size) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("expected filters incoherent '%v' with actual filters %v", expectedFilters, filters) + } + + b, err := json.Marshal([]types.Container{ + { + ID: "container_id1", + }, + { + ID: "container_id2", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("before", "container") + containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ + Size: true, + All: true, + Since: "container", + Filters: filters, + }) + if err != nil { + t.Fatal(err) + } + if len(containers) != 2 { + t.Fatalf("expected 2 containers, got %v", containers) + } +} diff --git a/components/engine/client/container_logs.go b/components/engine/client/container_logs.go new file mode 100644 index 0000000000..69056b6321 --- /dev/null +++ b/components/engine/client/container_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/components/engine/client/container_logs_test.go b/components/engine/client/container_logs_test.go new file mode 100644 index 0000000000..99e31842c9 --- /dev/null +++ b/components/engine/client/container_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestContainerLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestContainerLogs(t *testing.T) { + expectedURL := "/containers/container_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerLogs(context.Background(), "container_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ContainerLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ContainerLogs(ctx, "container_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/components/engine/client/container_pause.go b/components/engine/client/container_pause.go new file mode 100644 index 0000000000..412067a782 --- /dev/null +++ b/components/engine/client/container_pause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/container_pause_test.go b/components/engine/client/container_pause_test.go new file mode 100644 index 0000000000..0ee2f05d7e --- /dev/null +++ b/components/engine/client/container_pause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerPauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerPause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerPause(t *testing.T) { + expectedURL := "/containers/container_id/pause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerPause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/container_prune.go b/components/engine/client/container_prune.go new file mode 100644 index 0000000000..b582170867 --- /dev/null +++ b/components/engine/client/container_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainersPrune requests the daemon to delete unused data +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { + var report types.ContainersPruneReport + + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/components/engine/client/container_prune_test.go b/components/engine/client/container_prune_test.go new file mode 100644 index 0000000000..8a1c63897b --- /dev/null +++ b/components/engine/client/container_prune_test.go @@ -0,0 +1,124 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestContainersPruneError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + } + + filters := filters.NewArgs() + + _, err := client.ContainersPrune(context.Background(), filters) + assert.EqualError(t, err, "Error response from daemon: Server error") +} + +func TestContainersPrune(t *testing.T) { + expectedURL := "/v1.25/containers/prune" + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingUntilFilters := filters.NewArgs() + danglingUntilFilters.Add("dangling", "true") + danglingUntilFilters.Add("until", "2016-12-15T14:00") + + labelFilters := filters.NewArgs() + labelFilters.Add("dangling", "true") + labelFilters.Add("label", "label1=foo") + labelFilters.Add("label", "label2!=bar") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.Args{}, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": "", + }, + }, + { + filters: danglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true}}`, + }, + }, + { + filters: danglingUntilFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"until":{"2016-12-15T14:00":true}}`, + }, + }, + { + filters: noDanglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + { + filters: labelFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + assert.Equal(t, expected, actual) + } + content, err := json.Marshal(types.ContainersPruneReport{ + ContainersDeleted: []string{"container_id1", "container_id2"}, + SpaceReclaimed: 9999, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.25", + } + + report, err := client.ContainersPrune(context.Background(), listCase.filters) + assert.NoError(t, err) + assert.Len(t, report.ContainersDeleted, 2) + assert.Equal(t, uint64(9999), report.SpaceReclaimed) + } +} diff --git a/components/engine/client/container_remove.go b/components/engine/client/container_remove.go new file mode 100644 index 0000000000..3a79590ced --- /dev/null +++ b/components/engine/client/container_remove.go @@ -0,0 +1,27 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/container_remove_test.go b/components/engine/client/container_remove_test.go new file mode 100644 index 0000000000..798c08b333 --- /dev/null +++ b/components/engine/client/container_remove_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRemove(t *testing.T) { + expectedURL := "/containers/container_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + volume := query.Get("v") + if volume != "1" { + return nil, fmt.Errorf("v (volume) not set in URL query properly. Expected '1', got %s", volume) + } + force := query.Get("force") + if force != "1" { + return nil, fmt.Errorf("force not set in URL query properly. Expected '1', got %s", force) + } + link := query.Get("link") + if link != "" { + return nil, fmt.Errorf("link should have not be present in query, go %s", link) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/container_rename.go b/components/engine/client/container_rename.go new file mode 100644 index 0000000000..0e718da7c6 --- /dev/null +++ b/components/engine/client/container_rename.go @@ -0,0 +1,16 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/container_rename_test.go b/components/engine/client/container_rename_test.go new file mode 100644 index 0000000000..732ebff5f7 --- /dev/null +++ b/components/engine/client/container_rename_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerRenameError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRename(context.Background(), "nothing", "newNothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRename(t *testing.T) { + expectedURL := "/containers/container_id/rename" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "newName" { + return nil, fmt.Errorf("name not set in URL query properly. Expected 'newName', got %s", name) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRename(context.Background(), "container_id", "newName") + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/container_resize.go b/components/engine/client/container_resize.go new file mode 100644 index 0000000000..66c3cc1940 --- /dev/null +++ b/components/engine/client/container_resize.go @@ -0,0 +1,29 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + query := url.Values{} + query.Set("h", strconv.Itoa(int(height))) + query.Set("w", strconv.Itoa(int(width))) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/container_resize_test.go b/components/engine/client/container_resize_test.go new file mode 100644 index 0000000000..5b2efecdce --- /dev/null +++ b/components/engine/client/container_resize_test.go @@ -0,0 +1,82 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/containers/container_id/resize")), + } + + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/exec/exec_id/resize")), + } + + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func resizeTransport(expectedURL string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + h := query.Get("h") + if h != "500" { + return nil, fmt.Errorf("h not set in URL query properly. Expected '500', got %s", h) + } + w := query.Get("w") + if w != "600" { + return nil, fmt.Errorf("w not set in URL query properly. Expected '600', got %s", w) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + } +} diff --git a/components/engine/client/container_restart.go b/components/engine/client/container_restart.go new file mode 100644 index 0000000000..74d7455f02 --- /dev/null +++ b/components/engine/client/container_restart.go @@ -0,0 +1,22 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon to wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/container_restart_test.go b/components/engine/client/container_restart_test.go new file mode 100644 index 0000000000..8c3cfd6a6f --- /dev/null +++ b/components/engine/client/container_restart_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerRestartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerRestart(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRestart(t *testing.T) { + expectedURL := "/containers/container_id/restart" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerRestart(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/container_start.go b/components/engine/client/container_start.go new file mode 100644 index 0000000000..b1f08de416 --- /dev/null +++ b/components/engine/client/container_start.go @@ -0,0 +1,24 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/container_start_test.go b/components/engine/client/container_start_test.go new file mode 100644 index 0000000000..5826fa8bc7 --- /dev/null +++ b/components/engine/client/container_start_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerStart(context.Background(), "nothing", types.ContainerStartOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStart(t *testing.T) { + expectedURL := "/containers/container_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + // we're not expecting any payload, but if one is supplied, check it is valid. + if req.Header.Get("Content-Type") == "application/json" { + var startConfig interface{} + if err := json.NewDecoder(req.Body).Decode(&startConfig); err != nil { + return nil, fmt.Errorf("Unable to parse json: %s", err) + } + } + + checkpoint := req.URL.Query().Get("checkpoint") + if checkpoint != "checkpoint_id" { + return nil, fmt.Errorf("checkpoint not set in URL query properly. Expected 'checkpoint_id', got %s", checkpoint) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerStart(context.Background(), "container_id", types.ContainerStartOptions{CheckpointID: "checkpoint_id"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/container_stats.go b/components/engine/client/container_stats.go new file mode 100644 index 0000000000..4758c66e32 --- /dev/null +++ b/components/engine/client/container_stats.go @@ -0,0 +1,26 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} diff --git a/components/engine/client/container_stats_test.go b/components/engine/client/container_stats_test.go new file mode 100644 index 0000000000..7414f135c3 --- /dev/null +++ b/components/engine/client/container_stats_test.go @@ -0,0 +1,70 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerStatsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStats(context.Background(), "nothing", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStats(t *testing.T) { + expectedURL := "/containers/container_id/stats" + cases := []struct { + stream bool + expectedStream string + }{ + { + expectedStream: "0", + }, + { + stream: true, + expectedStream: "1", + }, + } + for _, c := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + query := r.URL.Query() + stream := query.Get("stream") + if stream != c.expectedStream { + return nil, fmt.Errorf("stream not set in URL query properly. Expected '%s', got %s", c.expectedStream, stream) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + resp, err := client.ContainerStats(context.Background(), "container_id", c.stream) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} diff --git a/components/engine/client/container_stop.go b/components/engine/client/container_stop.go new file mode 100644 index 0000000000..b5418ae8c8 --- /dev/null +++ b/components/engine/client/container_stop.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerStop stops a container without terminating the process. +// The process is blocked until the container stops or the timeout expires. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/container_stop_test.go b/components/engine/client/container_stop_test.go new file mode 100644 index 0000000000..c32cd691c4 --- /dev/null +++ b/components/engine/client/container_stop_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerStopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerStop(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStop(t *testing.T) { + expectedURL := "/containers/container_id/stop" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerStop(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/container_top.go b/components/engine/client/container_top.go new file mode 100644 index 0000000000..9689123a40 --- /dev/null +++ b/components/engine/client/container_top.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { + var response container.ContainerTopOKBody + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/components/engine/client/container_top_test.go b/components/engine/client/container_top_test.go new file mode 100644 index 0000000000..68ccef505d --- /dev/null +++ b/components/engine/client/container_top_test.go @@ -0,0 +1,74 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerTopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerTop(context.Background(), "nothing", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerTop(t *testing.T) { + expectedURL := "/containers/container_id/top" + expectedProcesses := [][]string{ + {"p1", "p2"}, + {"p3"}, + } + expectedTitles := []string{"title1", "title2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + args := query.Get("ps_args") + if args != "arg1 arg2" { + return nil, fmt.Errorf("args not set in URL query properly. Expected 'arg1 arg2', got %v", args) + } + + b, err := json.Marshal(container.ContainerTopOKBody{ + Processes: [][]string{ + {"p1", "p2"}, + {"p3"}, + }, + Titles: []string{"title1", "title2"}, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + processList, err := client.ContainerTop(context.Background(), "container_id", []string{"arg1", "arg2"}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expectedProcesses, processList.Processes) { + t.Fatalf("Processes: expected %v, got %v", expectedProcesses, processList.Processes) + } + if !reflect.DeepEqual(expectedTitles, processList.Titles) { + t.Fatalf("Titles: expected %v, got %v", expectedTitles, processList.Titles) + } +} diff --git a/components/engine/client/container_unpause.go b/components/engine/client/container_unpause.go new file mode 100644 index 0000000000..5c76211256 --- /dev/null +++ b/components/engine/client/container_unpause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/container_unpause_test.go b/components/engine/client/container_unpause_test.go new file mode 100644 index 0000000000..2c42727191 --- /dev/null +++ b/components/engine/client/container_unpause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerUnpauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerUnpause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUnpause(t *testing.T) { + expectedURL := "/containers/container_id/unpause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerUnpause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/container_update.go b/components/engine/client/container_update.go new file mode 100644 index 0000000000..5082f22dfa --- /dev/null +++ b/components/engine/client/container_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +// ContainerUpdate updates resources of a container +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody + serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + + ensureReaderClosed(serverResp) + return response, err +} diff --git a/components/engine/client/container_update_test.go b/components/engine/client/container_update_test.go new file mode 100644 index 0000000000..715bb7ca23 --- /dev/null +++ b/components/engine/client/container_update_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerUpdate(context.Background(), "nothing", container.UpdateConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUpdate(t *testing.T) { + expectedURL := "/containers/container_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + b, err := json.Marshal(container.ContainerUpdateOKBody{}) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + _, err := client.ContainerUpdate(context.Background(), "container_id", container.UpdateConfig{ + Resources: container.Resources{ + CPUPeriod: 1, + }, + RestartPolicy: container.RestartPolicy{ + Name: "always", + }, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/container_wait.go b/components/engine/client/container_wait.go new file mode 100644 index 0000000000..af4559c4b9 --- /dev/null +++ b/components/engine/client/container_wait.go @@ -0,0 +1,84 @@ +package client + +import ( + "encoding/json" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +// ContainerWait waits until the specified continer is in a certain state +// indicated by the given condition, either "not-running" (default), +// "next-exit", or "removed". +// +// If this client's API version is beforer 1.30, condition is ignored and +// ContainerWait will return immediately with the two channels, as the server +// will wait as if the condition were "not-running". +// +// If this client's API version is at least 1.30, ContainerWait blocks until +// the request has been acknowledged by the server (with a response header), +// then returns two channels on which the caller can wait for the exit status +// of the container or an error if there was a problem either beginning the +// wait request or in getting the response. This allows the caller to +// sychronize ContainerWait with other calls, such as specifying a +// "next-exit" condition before issuing a ContainerStart request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + if versions.LessThan(cli.ClientVersion(), "1.30") { + return cli.legacyContainerWait(ctx, containerID) + } + + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error, 1) + + query := url.Values{} + query.Set("condition", string(condition)) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return resultC, errC + } + + go func() { + defer ensureReaderClosed(resp) + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} + +// legacyContainerWait returns immediately and doesn't have an option to wait +// until the container is removed. +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error) + + go func() { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + errC <- err + return + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} diff --git a/components/engine/client/container_wait_test.go b/components/engine/client/container_wait_test.go new file mode 100644 index 0000000000..7b8c9f0964 --- /dev/null +++ b/components/engine/client/container_wait_test.go @@ -0,0 +1,74 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + + "golang.org/x/net/context" +) + +func TestContainerWaitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + resultC, errC := client.ContainerWait(context.Background(), "nothing", "") + select { + case result := <-resultC: + t.Fatalf("expected to not get a wait result, got %d", result.StatusCode) + case err := <-errC: + if err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + } +} + +func TestContainerWait(t *testing.T) { + expectedURL := "/containers/container_id/wait" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(container.ContainerWaitOKBody{ + StatusCode: 15, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + resultC, errC := client.ContainerWait(context.Background(), "container_id", "") + select { + case err := <-errC: + t.Fatal(err) + case result := <-resultC: + if result.StatusCode != 15 { + t.Fatalf("expected a status code equal to '15', got %d", result.StatusCode) + } + } +} + +func ExampleClient_ContainerWait_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + _, errC := client.ContainerWait(ctx, "container_id", "") + if err := <-errC; err != nil { + log.Fatal(err) + } +} diff --git a/components/engine/client/disk_usage.go b/components/engine/client/disk_usage.go new file mode 100644 index 0000000000..03c80b39af --- /dev/null +++ b/components/engine/client/disk_usage.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// DiskUsage requests the current data usage from the daemon +func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { + var du types.DiskUsage + + serverResp, err := cli.get(ctx, "/system/df", nil, nil) + if err != nil { + return du, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + return du, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return du, nil +} diff --git a/components/engine/client/disk_usage_test.go b/components/engine/client/disk_usage_test.go new file mode 100644 index 0000000000..51e622233e --- /dev/null +++ b/components/engine/client/disk_usage_test.go @@ -0,0 +1,55 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestDiskUsageError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.DiskUsage(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestDiskUsage(t *testing.T) { + expectedURL := "/system/df" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + du := types.DiskUsage{ + LayersSize: int64(100), + Images: nil, + Containers: nil, + Volumes: nil, + } + + b, err := json.Marshal(du) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + if _, err := client.DiskUsage(context.Background()); err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/distribution_inspect.go b/components/engine/client/distribution_inspect.go new file mode 100644 index 0000000000..d17ab73213 --- /dev/null +++ b/components/engine/client/distribution_inspect.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + registrytypes "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// DistributionInspect returns the image digest with full Manifest +func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { + var headers map[string][]string + + if encodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {encodedRegistryAuth}, + } + } + + // Contact the registry to retrieve digest and platform information + var distributionInspect registrytypes.DistributionInspect + resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + if err != nil { + return distributionInspect, err + } + + err = json.NewDecoder(resp.body).Decode(&distributionInspect) + ensureReaderClosed(resp) + return distributionInspect, err +} diff --git a/components/engine/client/errors.go b/components/engine/client/errors.go new file mode 100644 index 0000000000..e0effafc0a --- /dev/null +++ b/components/engine/client/errors.go @@ -0,0 +1,300 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + host string +} + +// Error returns a string representation of an errConnectionFailed +func (err errConnectionFailed) Error() string { + if err.host == "" { + return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" + } + return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + _, ok := errors.Cause(err).(errConnectionFailed) + return ok +} + +// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +func ErrorConnectionFailed(host string) error { + return errConnectionFailed{host: host} +} + +type notFound interface { + error + NotFound() bool // Is the error a NotFound error +} + +// IsErrNotFound returns true if the error is caused with an +// object (image, container, network, volume, …) is not found in the docker host. +func IsErrNotFound(err error) bool { + te, ok := err.(notFound) + return ok && te.NotFound() +} + +// imageNotFoundError implements an error returned when an image is not in the docker host. +type imageNotFoundError struct { + imageID string +} + +// NotFound indicates that this error type is of NotFound +func (e imageNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of an imageNotFoundError +func (e imageNotFoundError) Error() string { + return fmt.Sprintf("Error: No such image: %s", e.imageID) +} + +// IsErrImageNotFound returns true if the error is caused +// when an image is not found in the docker host. +func IsErrImageNotFound(err error) bool { + return IsErrNotFound(err) +} + +// containerNotFoundError implements an error returned when a container is not in the docker host. +type containerNotFoundError struct { + containerID string +} + +// NotFound indicates that this error type is of NotFound +func (e containerNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a containerNotFoundError +func (e containerNotFoundError) Error() string { + return fmt.Sprintf("Error: No such container: %s", e.containerID) +} + +// IsErrContainerNotFound returns true if the error is caused +// when a container is not found in the docker host. +func IsErrContainerNotFound(err error) bool { + return IsErrNotFound(err) +} + +// networkNotFoundError implements an error returned when a network is not in the docker host. +type networkNotFoundError struct { + networkID string +} + +// NotFound indicates that this error type is of NotFound +func (e networkNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a networkNotFoundError +func (e networkNotFoundError) Error() string { + return fmt.Sprintf("Error: No such network: %s", e.networkID) +} + +// IsErrNetworkNotFound returns true if the error is caused +// when a network is not found in the docker host. +func IsErrNetworkNotFound(err error) bool { + return IsErrNotFound(err) +} + +// volumeNotFoundError implements an error returned when a volume is not in the docker host. +type volumeNotFoundError struct { + volumeID string +} + +// NotFound indicates that this error type is of NotFound +func (e volumeNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a volumeNotFoundError +func (e volumeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such volume: %s", e.volumeID) +} + +// IsErrVolumeNotFound returns true if the error is caused +// when a volume is not found in the docker host. +func IsErrVolumeNotFound(err error) bool { + return IsErrNotFound(err) +} + +// unauthorizedError represents an authorization error in a remote registry. +type unauthorizedError struct { + cause error +} + +// Error returns a string representation of an unauthorizedError +func (u unauthorizedError) Error() string { + return u.cause.Error() +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +func IsErrUnauthorized(err error) bool { + _, ok := err.(unauthorizedError) + return ok +} + +// nodeNotFoundError implements an error returned when a node is not found. +type nodeNotFoundError struct { + nodeID string +} + +// Error returns a string representation of a nodeNotFoundError +func (e nodeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such node: %s", e.nodeID) +} + +// NotFound indicates that this error type is of NotFound +func (e nodeNotFoundError) NotFound() bool { + return true +} + +// IsErrNodeNotFound returns true if the error is caused +// when a node is not found. +func IsErrNodeNotFound(err error) bool { + _, ok := err.(nodeNotFoundError) + return ok +} + +// serviceNotFoundError implements an error returned when a service is not found. +type serviceNotFoundError struct { + serviceID string +} + +// Error returns a string representation of a serviceNotFoundError +func (e serviceNotFoundError) Error() string { + return fmt.Sprintf("Error: No such service: %s", e.serviceID) +} + +// NotFound indicates that this error type is of NotFound +func (e serviceNotFoundError) NotFound() bool { + return true +} + +// IsErrServiceNotFound returns true if the error is caused +// when a service is not found. +func IsErrServiceNotFound(err error) bool { + _, ok := err.(serviceNotFoundError) + return ok +} + +// taskNotFoundError implements an error returned when a task is not found. +type taskNotFoundError struct { + taskID string +} + +// Error returns a string representation of a taskNotFoundError +func (e taskNotFoundError) Error() string { + return fmt.Sprintf("Error: No such task: %s", e.taskID) +} + +// NotFound indicates that this error type is of NotFound +func (e taskNotFoundError) NotFound() bool { + return true +} + +// IsErrTaskNotFound returns true if the error is caused +// when a task is not found. +func IsErrTaskNotFound(err error) bool { + _, ok := err.(taskNotFoundError) + return ok +} + +type pluginPermissionDenied struct { + name string +} + +func (e pluginPermissionDenied) Error() string { + return "Permission denied while installing plugin " + e.name +} + +// IsErrPluginPermissionDenied returns true if the error is caused +// when a user denies a plugin's permissions +func IsErrPluginPermissionDenied(err error) bool { + _, ok := err.(pluginPermissionDenied) + return ok +} + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) + } + return nil +} + +// secretNotFoundError implements an error returned when a secret is not found. +type secretNotFoundError struct { + name string +} + +// Error returns a string representation of a secretNotFoundError +func (e secretNotFoundError) Error() string { + return fmt.Sprintf("Error: no such secret: %s", e.name) +} + +// NotFound indicates that this error type is of NotFound +func (e secretNotFoundError) NotFound() bool { + return true +} + +// IsErrSecretNotFound returns true if the error is caused +// when a secret is not found. +func IsErrSecretNotFound(err error) bool { + _, ok := err.(secretNotFoundError) + return ok +} + +// configNotFoundError implements an error returned when a config is not found. +type configNotFoundError struct { + name string +} + +// Error returns a string representation of a configNotFoundError +func (e configNotFoundError) Error() string { + return fmt.Sprintf("Error: no such config: %s", e.name) +} + +// NotFound indicates that this error type is of NotFound +func (e configNotFoundError) NotFound() bool { + return true +} + +// IsErrConfigNotFound returns true if the error is caused +// when a config is not found. +func IsErrConfigNotFound(err error) bool { + _, ok := err.(configNotFoundError) + return ok +} + +// pluginNotFoundError implements an error returned when a plugin is not in the docker host. +type pluginNotFoundError struct { + name string +} + +// NotFound indicates that this error type is of NotFound +func (e pluginNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a pluginNotFoundError +func (e pluginNotFoundError) Error() string { + return fmt.Sprintf("Error: No such plugin: %s", e.name) +} + +// IsErrPluginNotFound returns true if the error is caused +// when a plugin is not found in the docker host. +func IsErrPluginNotFound(err error) bool { + return IsErrNotFound(err) +} diff --git a/components/engine/client/events.go b/components/engine/client/events.go new file mode 100644 index 0000000000..af47aefa74 --- /dev/null +++ b/components/engine/client/events.go @@ -0,0 +1,102 @@ +package client + +import ( + "encoding/json" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" +) + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an io.EOF error will +// be sent over the error channel. If an error is sent all processing will be stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(cli.version, options) + if err != nil { + close(started) + errs <- err + return + } + + resp, err := cli.get(ctx, "/events", query, nil) + if err != nil { + close(started) + errs <- err + return + } + defer resp.body.Close() + + decoder := json.NewDecoder(resp.body) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder.Decode(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return messages, errs +} + +func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + return query, nil +} diff --git a/components/engine/client/events_test.go b/components/engine/client/events_test.go new file mode 100644 index 0000000000..ba82d2f542 --- /dev/null +++ b/components/engine/client/events_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" +) + +func TestEventsErrorInOptions(t *testing.T) { + errorCases := []struct { + options types.EventsOptions + expectedError string + }{ + { + options: types.EventsOptions{ + Since: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + { + options: types.EventsOptions{ + Until: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + } + for _, e := range errorCases { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), e.options) + err := <-errs + if err == nil || !strings.Contains(err.Error(), e.expectedError) { + t.Fatalf("expected an error %q, got %v", e.expectedError, err) + } + } +} + +func TestEventsErrorFromServer(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), types.EventsOptions{}) + err := <-errs + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestEvents(t *testing.T) { + + expectedURL := "/events" + + filters := filters.NewArgs() + filters.Add("type", events.ContainerEventType) + expectedFiltersJSON := fmt.Sprintf(`{"type":{"%s":true}}`, events.ContainerEventType) + + eventsCases := []struct { + options types.EventsOptions + events []events.Message + expectedEvents map[string]bool + expectedQueryParams map[string]string + }{ + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{}, + expectedEvents: make(map[string]bool), + }, + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{ + { + Type: "container", + ID: "1", + Action: "create", + }, + { + Type: "container", + ID: "2", + Action: "die", + }, + { + Type: "container", + ID: "3", + Action: "create", + }, + }, + expectedEvents: map[string]bool{ + "1": true, + "2": true, + "3": true, + }, + }, + } + + for _, eventsCase := range eventsCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + + for key, expected := range eventsCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + buffer := new(bytes.Buffer) + + for _, e := range eventsCase.events { + b, _ := json.Marshal(e) + buffer.Write(b) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(buffer), + }, nil + }), + } + + messages, errs := client.Events(context.Background(), eventsCase.options) + + loop: + for { + select { + case err := <-errs: + if err != nil && err != io.EOF { + t.Fatal(err) + } + + break loop + case e := <-messages: + _, ok := eventsCase.expectedEvents[e.ID] + if !ok { + t.Fatalf("event received not expected with action %s & id %s", e.Action, e.ID) + } + } + } + } +} diff --git a/components/engine/client/hijack.go b/components/engine/client/hijack.go new file mode 100644 index 0000000000..74c53f52b3 --- /dev/null +++ b/components/engine/client/hijack.go @@ -0,0 +1,177 @@ +package client + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// tlsClientCon holds tls information and a dialed connection. +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if conn, ok := c.rawConn.(types.CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + apiPath := cli.getAPIPath(path, query) + req, err := http.NewRequest("POST", apiPath, bodyEncoded) + if err != nil { + return types.HijackedResponse{}, err + } + req = cli.addHeaders(req, headers) + + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") + + conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + return types.HijackedResponse{}, err + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + _, err = clientconn.Do(req) + + rwc, br := clientconn.Hijack() + + return types.HijackedResponse{Conn: rwc, Reader: br}, err +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + proxyDialer, err := sockets.DialerFromEnvironment(dialer) + if err != nil { + return nil, err + } + + rawConn, err := proxyDialer.Dial(network, addr) + if err != nil { + return nil, err + } + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := rawConn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + config = tlsconfig.Clone(config) + config.ServerName = hostname + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + // Notice this isn't Go standard's tls.Dial function + return tlsDial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} diff --git a/components/engine/client/image_build.go b/components/engine/client/image_build.go new file mode 100644 index 0000000000..bb69143e99 --- /dev/null +++ b/components/engine/client/image_build.go @@ -0,0 +1,125 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := cli.imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/x-tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + "securityopt": options.SecurityOpt, + "extrahosts": options.ExtraHosts, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + query.Set("target", options.Target) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + + return query, nil +} diff --git a/components/engine/client/image_build_test.go b/components/engine/client/image_build_test.go new file mode 100644 index 0000000000..1e18b7bda8 --- /dev/null +++ b/components/engine/client/image_build_test.go @@ -0,0 +1,233 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-units" +) + +func TestImageBuildError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageBuild(context.Background(), nil, types.ImageBuildOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageBuild(t *testing.T) { + v1 := "value1" + v2 := "value2" + emptyRegistryConfig := "bnVsbA==" + buildCases := []struct { + buildOptions types.ImageBuildOptions + expectedQueryParams map[string]string + expectedTags []string + expectedRegistryConfig string + }{ + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: true, + NoCache: true, + Remove: true, + ForceRemove: true, + PullParent: true, + }, + expectedQueryParams: map[string]string{ + "q": "1", + "nocache": "1", + "rm": "1", + "forcerm": "1", + "pull": "1", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: false, + NoCache: false, + Remove: false, + ForceRemove: false, + PullParent: false, + }, + expectedQueryParams: map[string]string{ + "q": "", + "nocache": "", + "rm": "0", + "forcerm": "", + "pull": "", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + RemoteContext: "remoteContext", + Isolation: container.Isolation("isolation"), + CPUSetCPUs: "2", + CPUSetMems: "12", + CPUShares: 20, + CPUQuota: 10, + CPUPeriod: 30, + Memory: 256, + MemorySwap: 512, + ShmSize: 10, + CgroupParent: "cgroup_parent", + Dockerfile: "Dockerfile", + }, + expectedQueryParams: map[string]string{ + "remote": "remoteContext", + "isolation": "isolation", + "cpusetcpus": "2", + "cpusetmems": "12", + "cpushares": "20", + "cpuquota": "10", + "cpuperiod": "30", + "memory": "256", + "memswap": "512", + "shmsize": "10", + "cgroupparent": "cgroup_parent", + "dockerfile": "Dockerfile", + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + BuildArgs: map[string]*string{ + "ARG1": &v1, + "ARG2": &v2, + "ARG3": nil, + }, + }, + expectedQueryParams: map[string]string{ + "buildargs": `{"ARG1":"value1","ARG2":"value2","ARG3":null}`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + Ulimits: []*units.Ulimit{ + { + Name: "nproc", + Hard: 65557, + Soft: 65557, + }, + { + Name: "nofile", + Hard: 20000, + Soft: 40000, + }, + }, + }, + expectedQueryParams: map[string]string{ + "ulimits": `[{"Name":"nproc","Hard":65557,"Soft":65557},{"Name":"nofile","Hard":20000,"Soft":40000}]`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + AuthConfigs: map[string]types.AuthConfig{ + "https://index.docker.io/v1/": { + Auth: "dG90bwo=", + }, + }, + }, + expectedQueryParams: map[string]string{ + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289In19", + }, + } + for _, buildCase := range buildCases { + expectedURL := "/build" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check request headers + registryConfig := r.Header.Get("X-Registry-Config") + if registryConfig != buildCase.expectedRegistryConfig { + return nil, fmt.Errorf("X-Registry-Config header not properly set in the request. Expected '%s', got %s", buildCase.expectedRegistryConfig, registryConfig) + } + contentType := r.Header.Get("Content-Type") + if contentType != "application/x-tar" { + return nil, fmt.Errorf("Content-type header not properly set in the request. Expected 'application/x-tar', got %s", contentType) + } + + // Check query parameters + query := r.URL.Query() + for key, expected := range buildCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + // Check tags + if len(buildCase.expectedTags) > 0 { + tags := query["t"] + if !reflect.DeepEqual(tags, buildCase.expectedTags) { + return nil, fmt.Errorf("t (tags) not set in URL query properly. Expected '%s', got %s", buildCase.expectedTags, tags) + } + } + + headers := http.Header{} + headers.Add("Server", "Docker/v1.23 (MyOS)") + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + Header: headers, + }, nil + }), + } + buildResponse, err := client.ImageBuild(context.Background(), nil, buildCase.buildOptions) + if err != nil { + t.Fatal(err) + } + if buildResponse.OSType != "MyOS" { + t.Fatalf("expected OSType to be 'MyOS', got %s", buildResponse.OSType) + } + response, err := ioutil.ReadAll(buildResponse.Body) + if err != nil { + t.Fatal(err) + } + buildResponse.Body.Close() + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } + } +} + +func TestGetDockerOS(t *testing.T) { + cases := map[string]string{ + "Docker/v1.22 (linux)": "linux", + "Docker/v1.22 (windows)": "windows", + "Foo/v1.22 (bar)": "", + } + for header, os := range cases { + g := getDockerOS(header) + if g != os { + t.Fatalf("Expected %s, got %s", os, g) + } + } +} diff --git a/components/engine/client/image_create.go b/components/engine/client/image_create.go new file mode 100644 index 0000000000..4436abb0dd --- /dev/null +++ b/components/engine/client/image_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageCreate creates a new image based in the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("tag", getAPITagFromNamedRef(ref)) + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/components/engine/client/image_create_test.go b/components/engine/client/image_create_test.go new file mode 100644 index 0000000000..5c2edd2ad5 --- /dev/null +++ b/components/engine/client/image_create_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImageCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageCreate(context.Background(), "reference", types.ImageCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageCreate(t *testing.T) { + expectedURL := "/images/create" + expectedImage := "test:5000/my_image" + expectedTag := "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + expectedReference := fmt.Sprintf("%s@%s", expectedImage, expectedTag) + expectedRegistryAuth := "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289IiwiZW1haWwiOiJqb2huQGRvZS5jb20ifX0=" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + registryAuth := r.Header.Get("X-Registry-Auth") + if registryAuth != expectedRegistryAuth { + return nil, fmt.Errorf("X-Registry-Auth header not properly set in the request. Expected '%s', got %s", expectedRegistryAuth, registryAuth) + } + + query := r.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != expectedImage { + return nil, fmt.Errorf("fromImage not set in URL query properly. Expected '%s', got %s", expectedImage, fromImage) + } + + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", expectedTag, tag) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + createResponse, err := client.ImageCreate(context.Background(), expectedReference, types.ImageCreateOptions{ + RegistryAuth: expectedRegistryAuth, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(createResponse) + if err != nil { + t.Fatal(err) + } + if err = createResponse.Close(); err != nil { + t.Fatal(err) + } + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } +} diff --git a/components/engine/client/image_history.go b/components/engine/client/image_history.go new file mode 100644 index 0000000000..7b4babcba3 --- /dev/null +++ b/components/engine/client/image_history.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/image" + "golang.org/x/net/context" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { + var history []image.HistoryResponseItem + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + ensureReaderClosed(serverResp) + return history, err +} diff --git a/components/engine/client/image_history_test.go b/components/engine/client/image_history_test.go new file mode 100644 index 0000000000..101bffd0c3 --- /dev/null +++ b/components/engine/client/image_history_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/image" + "golang.org/x/net/context" +) + +func TestImageHistoryError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageHistory(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageHistory(t *testing.T) { + expectedURL := "/images/image_id/history" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + b, err := json.Marshal([]image.HistoryResponseItem{ + { + ID: "image_id1", + Tags: []string{"tag1", "tag2"}, + }, + { + ID: "image_id2", + Tags: []string{"tag1", "tag2"}, + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageHistories, err := client.ImageHistory(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if len(imageHistories) != 2 { + t.Fatalf("expected 2 containers, got %v", imageHistories) + } +} diff --git a/components/engine/client/image_import.go b/components/engine/client/image_import.go new file mode 100644 index 0000000000..d7dedd8232 --- /dev/null +++ b/components/engine/client/image_import.go @@ -0,0 +1,37 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNormalizedNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/components/engine/client/image_import_test.go b/components/engine/client/image_import_test.go new file mode 100644 index 0000000000..370ad5fbed --- /dev/null +++ b/components/engine/client/image_import_test.go @@ -0,0 +1,81 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageImportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageImport(context.Background(), types.ImageImportSource{}, "image:tag", types.ImageImportOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageImport(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + fromSrc := query.Get("fromSrc") + if fromSrc != "image_source" { + return nil, fmt.Errorf("fromSrc not set in URL query properly. Expected 'image_source', got %s", fromSrc) + } + repo := query.Get("repo") + if repo != "repository_name:imported" { + return nil, fmt.Errorf("repo not set in URL query properly. Expected 'repository_name:imported', got %s", repo) + } + tag := query.Get("tag") + if tag != "imported" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected 'imported', got %s", tag) + } + message := query.Get("message") + if message != "A message" { + return nil, fmt.Errorf("message not set in URL query properly. Expected 'A message', got %s", message) + } + changes := query["changes"] + expectedChanges := []string{"change1", "change2"} + if !reflect.DeepEqual(expectedChanges, changes) { + return nil, fmt.Errorf("changes not set in URL query properly. Expected %v, got %v", expectedChanges, changes) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + importResponse, err := client.ImageImport(context.Background(), types.ImageImportSource{ + Source: strings.NewReader("source"), + SourceName: "image_source", + }, "repository_name:imported", types.ImageImportOptions{ + Tag: "imported", + Message: "A message", + Changes: []string{"change1", "change2"}, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(importResponse) + if err != nil { + t.Fatal(err) + } + importResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/components/engine/client/image_inspect.go b/components/engine/client/image_inspect.go new file mode 100644 index 0000000000..b3a64ce2f8 --- /dev/null +++ b/components/engine/client/image_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ImageInspect{}, nil, imageNotFoundError{imageID} + } + return types.ImageInspect{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/components/engine/client/image_inspect_test.go b/components/engine/client/image_inspect_test.go new file mode 100644 index 0000000000..74a4e49805 --- /dev/null +++ b/components/engine/client/image_inspect_test.go @@ -0,0 +1,71 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageInspectImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestImageInspect(t *testing.T) { + expectedURL := "/images/image_id/json" + expectedTags := []string{"tag1", "tag2"} + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ImageInspect{ + ID: "image_id", + RepoTags: expectedTags, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + imageInspect, _, err := client.ImageInspectWithRaw(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if imageInspect.ID != "image_id" { + t.Fatalf("expected `image_id`, got %s", imageInspect.ID) + } + if !reflect.DeepEqual(imageInspect.RepoTags, expectedTags) { + t.Fatalf("expected `%v`, got %v", expectedTags, imageInspect.RepoTags) + } +} diff --git a/components/engine/client/image_list.go b/components/engine/client/image_list.go new file mode 100644 index 0000000000..f26464f67c --- /dev/null +++ b/components/engine/client/image_list.go @@ -0,0 +1,45 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary + query := url.Values{} + + optionFilters := options.Filters + referenceFilters := optionFilters.Get("reference") + if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { + query.Set("filter", referenceFilters[0]) + for _, filterValue := range referenceFilters { + optionFilters.Del("reference", filterValue) + } + } + if optionFilters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.All { + query.Set("all", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + ensureReaderClosed(serverResp) + return images, err +} diff --git a/components/engine/client/image_list_test.go b/components/engine/client/image_list_test.go new file mode 100644 index 0000000000..7c4a46414d --- /dev/null +++ b/components/engine/client/image_list_test.go @@ -0,0 +1,159 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestImageListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageList(context.Background(), types.ImageListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageList(t *testing.T) { + expectedURL := "/images/json" + + noDanglingfilters := filters.NewArgs() + noDanglingfilters.Add("dangling", "false") + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("dangling", "true") + + listCases := []struct { + options types.ImageListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ImageListOptions{}, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": "", + }, + }, + { + options: types.ImageListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1":true,"label2":true}}`, + }, + }, + { + options: types.ImageListOptions{ + Filters: noDanglingfilters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + images, err := client.ImageList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } + } +} + +func TestImageListApiBefore125(t *testing.T) { + expectedFilter := "image:tag" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + query := req.URL.Query() + actualFilter := query.Get("filter") + if actualFilter != expectedFilter { + return nil, fmt.Errorf("filter not set in URL query properly. Expected '%s', got %s", expectedFilter, actualFilter) + } + actualFilters := query.Get("filters") + if actualFilters != "" { + return nil, fmt.Errorf("filters should have not been present, were with value: %s", actualFilters) + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.24", + } + + filters := filters.NewArgs() + filters.Add("reference", "image:tag") + + options := types.ImageListOptions{ + Filters: filters, + } + + images, err := client.ImageList(context.Background(), options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } +} diff --git a/components/engine/client/image_load.go b/components/engine/client/image_load.go new file mode 100644 index 0000000000..77aaf1af36 --- /dev/null +++ b/components/engine/client/image_load.go @@ -0,0 +1,30 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/components/engine/client/image_load_test.go b/components/engine/client/image_load_test.go new file mode 100644 index 0000000000..68dc14ff22 --- /dev/null +++ b/components/engine/client/image_load_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageLoadError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageLoad(context.Background(), nil, true) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageLoad(t *testing.T) { + expectedURL := "/images/load" + expectedInput := "inputBody" + expectedOutput := "outputBody" + loadCases := []struct { + quiet bool + responseContentType string + expectedResponseJSON bool + expectedQueryParams map[string]string + }{ + { + quiet: false, + responseContentType: "text/plain", + expectedResponseJSON: false, + expectedQueryParams: map[string]string{ + "quiet": "0", + }, + }, + { + quiet: true, + responseContentType: "application/json", + expectedResponseJSON: true, + expectedQueryParams: map[string]string{ + "quiet": "1", + }, + }, + } + for _, loadCase := range loadCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + contentType := req.Header.Get("Content-Type") + if contentType != "application/x-tar" { + return nil, fmt.Errorf("content-type not set in URL headers properly. Expected 'application/x-tar', got %s", contentType) + } + query := req.URL.Query() + for key, expected := range loadCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + headers := http.Header{} + headers.Add("Content-Type", loadCase.responseContentType) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + Header: headers, + }, nil + }), + } + + input := bytes.NewReader([]byte(expectedInput)) + imageLoadResponse, err := client.ImageLoad(context.Background(), input, loadCase.quiet) + if err != nil { + t.Fatal(err) + } + if imageLoadResponse.JSON != loadCase.expectedResponseJSON { + t.Fatalf("expected a JSON response, was not.") + } + body, err := ioutil.ReadAll(imageLoadResponse.Body) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected %s, got %s", expectedOutput, string(body)) + } + } +} diff --git a/components/engine/client/image_prune.go b/components/engine/client/image_prune.go new file mode 100644 index 0000000000..5ef98b7f02 --- /dev/null +++ b/components/engine/client/image_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ImagesPrune requests the daemon to delete unused data +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { + var report types.ImagesPruneReport + + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/components/engine/client/image_prune_test.go b/components/engine/client/image_prune_test.go new file mode 100644 index 0000000000..453f84adee --- /dev/null +++ b/components/engine/client/image_prune_test.go @@ -0,0 +1,119 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestImagesPruneError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + } + + filters := filters.NewArgs() + + _, err := client.ImagesPrune(context.Background(), filters) + assert.EqualError(t, err, "Error response from daemon: Server error") +} + +func TestImagesPrune(t *testing.T) { + expectedURL := "/v1.25/images/prune" + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + labelFilters := filters.NewArgs() + labelFilters.Add("dangling", "true") + labelFilters.Add("label", "label1=foo") + labelFilters.Add("label", "label2!=bar") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.Args{}, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": "", + }, + }, + { + filters: danglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true}}`, + }, + }, + { + filters: noDanglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + { + filters: labelFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + assert.Equal(t, expected, actual) + } + content, err := json.Marshal(types.ImagesPruneReport{ + ImagesDeleted: []types.ImageDeleteResponseItem{ + { + Deleted: "image_id1", + }, + { + Deleted: "image_id2", + }, + }, + SpaceReclaimed: 9999, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.25", + } + + report, err := client.ImagesPrune(context.Background(), listCase.filters) + assert.NoError(t, err) + assert.Len(t, report.ImagesDeleted, 2) + assert.Equal(t, uint64(9999), report.SpaceReclaimed) + } +} diff --git a/components/engine/client/image_pull.go b/components/engine/client/image_pull.go new file mode 100644 index 0000000000..a72b9bf7fc --- /dev/null +++ b/components/engine/client/image_pull.go @@ -0,0 +1,61 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} diff --git a/components/engine/client/image_pull_test.go b/components/engine/client/image_pull_test.go new file mode 100644 index 0000000000..ab49d2d349 --- /dev/null +++ b/components/engine/client/image_pull_test.go @@ -0,0 +1,199 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePullReferenceParseError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePull(context.Background(), "", types.ImagePullOptions{}) + if err == nil || !strings.Contains(err.Error(), "invalid reference format") { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePullAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePullStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != "myimage" { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", "myimage", fromImage) + } + tag := query.Get("tag") + if tag != "latest" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "latest", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePullWithoutErrors(t *testing.T) { + expectedURL := "/images/create" + expectedOutput := "hello world" + pullCases := []struct { + all bool + reference string + expectedImage string + expectedTag string + }{ + { + all: false, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "latest", + }, + { + all: false, + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + { + all: true, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + all: true, + reference: "myimage:anything", + expectedImage: "myimage", + expectedTag: "", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != pullCase.expectedImage { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", pullCase.expectedImage, fromImage) + } + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePull(context.Background(), pullCase.reference, types.ImagePullOptions{ + All: pullCase.all, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/components/engine/client/image_push.go b/components/engine/client/image_push.go new file mode 100644 index 0000000000..410d2fb91d --- /dev/null +++ b/components/engine/client/image_push.go @@ -0,0 +1,56 @@ +package client + +import ( + "errors" + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + tag := "" + name := reference.FamiliarName(ref) + + if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged { + tag = nameTaggedRef.Tag() + } + + query := url.Values{} + query.Set("tag", tag) + + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/components/engine/client/image_push_test.go b/components/engine/client/image_push_test.go new file mode 100644 index 0000000000..f93debf5bb --- /dev/null +++ b/components/engine/client/image_push_test.go @@ -0,0 +1,180 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePushReferenceError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePush(context.Background(), "", types.ImagePushOptions{}) + if err == nil || !strings.Contains(err.Error(), "invalid reference format") { + t.Fatalf("expected an error, got %v", err) + } + // An canonical reference cannot be pushed + _, err = client.ImagePush(context.Background(), "repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", types.ImagePushOptions{}) + if err == nil || err.Error() != "cannot push a digest reference" { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePushAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePushStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/myimage/push" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != "tag" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "tag", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePush(context.Background(), "myimage:tag", types.ImagePushOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePushWithoutErrors(t *testing.T) { + expectedOutput := "hello world" + expectedURLFormat := "/images/%s/push" + pullCases := []struct { + reference string + expectedImage string + expectedTag string + }{ + { + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + expectedURL := fmt.Sprintf(expectedURLFormat, pullCase.expectedImage) + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePush(context.Background(), pullCase.reference, types.ImagePushOptions{}) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/components/engine/client/image_remove.go b/components/engine/client/image_remove.go new file mode 100644 index 0000000000..6921209ee1 --- /dev/null +++ b/components/engine/client/image_remove.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + if err != nil { + return nil, err + } + + var dels []types.ImageDeleteResponseItem + err = json.NewDecoder(resp.body).Decode(&dels) + ensureReaderClosed(resp) + return dels, err +} diff --git a/components/engine/client/image_remove_test.go b/components/engine/client/image_remove_test.go new file mode 100644 index 0000000000..9856311305 --- /dev/null +++ b/components/engine/client/image_remove_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageRemove(t *testing.T) { + expectedURL := "/images/image_id" + removeCases := []struct { + force bool + pruneChildren bool + expectedQueryParams map[string]string + }{ + { + force: false, + pruneChildren: false, + expectedQueryParams: map[string]string{ + "force": "", + "noprune": "1", + }, + }, { + force: true, + pruneChildren: true, + expectedQueryParams: map[string]string{ + "force": "1", + "noprune": "", + }, + }, + } + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range removeCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + b, err := json.Marshal([]types.ImageDeleteResponseItem{ + { + Untagged: "image_id1", + }, + { + Deleted: "image_id", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageDeletes, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{ + Force: removeCase.force, + PruneChildren: removeCase.pruneChildren, + }) + if err != nil { + t.Fatal(err) + } + if len(imageDeletes) != 2 { + t.Fatalf("expected 2 deleted images, got %v", imageDeletes) + } + } +} diff --git a/components/engine/client/image_save.go b/components/engine/client/image_save.go new file mode 100644 index 0000000000..ecac880a32 --- /dev/null +++ b/components/engine/client/image_save.go @@ -0,0 +1,22 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/components/engine/client/image_save_test.go b/components/engine/client/image_save_test.go new file mode 100644 index 0000000000..8f0cf88640 --- /dev/null +++ b/components/engine/client/image_save_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "testing" + + "golang.org/x/net/context" + + "strings" +) + +func TestImageSaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSave(context.Background(), []string{"nothing"}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageSave(t *testing.T) { + expectedURL := "/images/get" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + names := query["names"] + expectedNames := []string{"image_id1", "image_id2"} + if !reflect.DeepEqual(names, expectedNames) { + return nil, fmt.Errorf("names not set in URL query properly. Expected %v, got %v", names, expectedNames) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + saveResponse, err := client.ImageSave(context.Background(), []string{"image_id1", "image_id2"}) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(saveResponse) + if err != nil { + t.Fatal(err) + } + saveResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/components/engine/client/image_search.go b/components/engine/client/image_search.go new file mode 100644 index 0000000000..b0fcd5c23d --- /dev/null +++ b/components/engine/client/image_search.go @@ -0,0 +1,51 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// ImageSearch makes the docker host to search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + query.Set("limit", fmt.Sprintf("%d", options.Limit)) + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + ensureReaderClosed(resp) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/components/engine/client/image_search_test.go b/components/engine/client/image_search_test.go new file mode 100644 index 0000000000..b17bbd8343 --- /dev/null +++ b/components/engine/client/image_search_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "encoding/json" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" +) + +func TestImageSearchAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageSearchStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/search" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected 'IAmValid', got %s", auth) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected 1 result, got %v", results) + } +} + +func TestImageSearchWithoutErrors(t *testing.T) { + expectedURL := "/images/search" + filterArgs := filters.NewArgs() + filterArgs.Add("is-automated", "true") + filterArgs.Add("stars", "3") + + expectedFilters := `{"is-automated":{"true":true},"stars":{"3":true}}` + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", expectedFilters, filters) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + Filters: filterArgs, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected a result, got %v", results) + } +} diff --git a/components/engine/client/image_tag.go b/components/engine/client/image_tag.go new file mode 100644 index 0000000000..8924f71eb3 --- /dev/null +++ b/components/engine/client/image_tag.go @@ -0,0 +1,37 @@ +package client + +import ( + "net/url" + + "github.com/docker/distribution/reference" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, source, target string) error { + if _, err := reference.ParseAnyReference(source); err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) + } + + ref, err := reference.ParseNormalizedNamed(target) + if err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + ref = reference.TagNameOnly(ref) + + query := url.Values{} + query.Set("repo", reference.FamiliarName(ref)) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/image_tag_test.go b/components/engine/client/image_tag_test.go new file mode 100644 index 0000000000..f7a0ee331c --- /dev/null +++ b/components/engine/client/image_tag_test.go @@ -0,0 +1,143 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageTagError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "repo:tag") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +// Note: this is not testing all the InvalidReference as it's the responsibility +// of distribution/reference package. +func TestImageTagInvalidReference(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "aa/asdf$$^/aa") + if err == nil || err.Error() != `Error parsing reference: "aa/asdf$$^/aa" is not a valid repository/tag: invalid reference format` { + t.Fatalf("expected ErrReferenceInvalidFormat, got %v", err) + } +} + +func TestImageTagInvalidSourceImageName(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "invalid_source_image_name_", "repo:tag") + if err == nil || err.Error() != "Error parsing reference: \"invalid_source_image_name_\" is not a valid repository/tag: invalid reference format" { + t.Fatalf("expected Parsing Reference Error, got %v", err) + } +} + +func TestImageTagHexSource(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusOK, "OK")), + } + + err := client.ImageTag(context.Background(), "0d409d33b27e47423b049f7f863faa08655a8c901749c2b25b93ca67d01a470d", "repo:tag") + if err != nil { + t.Fatalf("got error: %v", err) + } +} + +func TestImageTag(t *testing.T) { + expectedURL := "/images/image_id/tag" + tagCases := []struct { + reference string + expectedQueryParams map[string]string + }{ + { + reference: "repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "repository", + "tag": "tag1", + }, + }, { + reference: "another_repository:latest", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "another_repository", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "latest", + }, + }, + } + for _, tagCase := range tagCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range tagCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ImageTag(context.Background(), "image_id", tagCase.reference) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/components/engine/client/info.go b/components/engine/client/info.go new file mode 100644 index 0000000000..ac07961224 --- /dev/null +++ b/components/engine/client/info.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + if err != nil { + return info, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/components/engine/client/info_test.go b/components/engine/client/info_test.go new file mode 100644 index 0000000000..79f23c8af2 --- /dev/null +++ b/components/engine/client/info_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestInfoServerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.Info(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestInfoInvalidResponseJSONError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("invalid json"))), + }, nil + }), + } + _, err := client.Info(context.Background()) + if err == nil || !strings.Contains(err.Error(), "invalid character") { + t.Fatalf("expected a 'invalid character' error, got %v", err) + } +} + +func TestInfo(t *testing.T) { + expectedURL := "/info" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + info := &types.Info{ + ID: "daemonID", + Containers: 3, + } + b, err := json.Marshal(info) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + info, err := client.Info(context.Background()) + if err != nil { + t.Fatal(err) + } + + if info.ID != "daemonID" { + t.Fatalf("expected daemonID, got %s", info.ID) + } + + if info.Containers != 3 { + t.Fatalf("expected 3 containers, got %d", info.Containers) + } +} diff --git a/components/engine/client/interface.go b/components/engine/client/interface.go new file mode 100644 index 0000000000..678a69ddf7 --- /dev/null +++ b/components/engine/client/interface.go @@ -0,0 +1,189 @@ +package client + +import ( + "io" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ConfigAPIClient + ContainerAPIClient + DistributionAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + PluginAPIClient + ServiceAPIClient + SwarmAPIClient + SecretAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + ServerVersion(ctx context.Context) (types.Version, error) + UpdateClientVersion(v string) +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) + ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, timeout *time.Duration) error + ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) +} + +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error + NetworkInspect(ctx context.Context, networkID string, verbose bool) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, networkID string, verbose bool) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, networkID string) error + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error + PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) + DiskUsage(ctx context.Context) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) + SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error +} + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigRemove(ctx context.Context, id string) error + ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) + ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error +} diff --git a/components/engine/client/interface_experimental.go b/components/engine/client/interface_experimental.go new file mode 100644 index 0000000000..51da98ecdd --- /dev/null +++ b/components/engine/client/interface_experimental.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +type apiClientExperimental interface { + CheckpointAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/components/engine/client/interface_stable.go b/components/engine/client/interface_stable.go new file mode 100644 index 0000000000..cc90a3cbb9 --- /dev/null +++ b/components/engine/client/interface_stable.go @@ -0,0 +1,10 @@ +package client + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + apiClientExperimental +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/components/engine/client/login.go b/components/engine/client/login.go new file mode 100644 index 0000000000..79219ff59c --- /dev/null +++ b/components/engine/client/login.go @@ -0,0 +1,29 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns unauthorizedError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + + if resp.statusCode == http.StatusUnauthorized { + return registry.AuthenticateOKBody{}, unauthorizedError{err} + } + if err != nil { + return registry.AuthenticateOKBody{}, err + } + + var response registry.AuthenticateOKBody + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/components/engine/client/network_connect.go b/components/engine/client/network_connect.go new file mode 100644 index 0000000000..c022c17b5b --- /dev/null +++ b/components/engine/client/network_connect.go @@ -0,0 +1,18 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "golang.org/x/net/context" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/network_connect_test.go b/components/engine/client/network_connect_test.go new file mode 100644 index 0000000000..91b1a76676 --- /dev/null +++ b/components/engine/client/network_connect_test.go @@ -0,0 +1,111 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +func TestNetworkConnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkConnectEmptyNilEndpointSettings(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig != nil { + return nil, fmt.Errorf("expected connect.EndpointConfig to be nil, got %v", connect.EndpointConfig) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err != nil { + t.Fatal(err) + } +} + +func TestNetworkConnect(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig == nil { + return nil, fmt.Errorf("expected connect.EndpointConfig to be not nil, got %v", connect.EndpointConfig) + } + + if connect.EndpointConfig.NetworkID != "NetworkID" { + return nil, fmt.Errorf("expected 'NetworkID', got %s", connect.EndpointConfig.NetworkID) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", &network.EndpointSettings{ + NetworkID: "NetworkID", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/network_create.go b/components/engine/client/network_create.go new file mode 100644 index 0000000000..4067a541ff --- /dev/null +++ b/components/engine/client/network_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + if err != nil { + return response, err + } + + json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/components/engine/client/network_create_test.go b/components/engine/client/network_create_test.go new file mode 100644 index 0000000000..0e2457f89c --- /dev/null +++ b/components/engine/client/network_create_test.go @@ -0,0 +1,72 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkCreate(t *testing.T) { + expectedURL := "/networks/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.NetworkCreateResponse{ + ID: "network_id", + Warning: "warning", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResponse, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{ + CheckDuplicate: true, + Driver: "mydriver", + EnableIPv6: true, + Internal: true, + Options: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if networkResponse.ID != "network_id" { + t.Fatalf("expected networkResponse.ID to be 'network_id', got %s", networkResponse.ID) + } + if networkResponse.Warning != "warning" { + t.Fatalf("expected networkResponse.Warning to be 'warning', got %s", networkResponse.Warning) + } +} diff --git a/components/engine/client/network_disconnect.go b/components/engine/client/network_disconnect.go new file mode 100644 index 0000000000..24b58e3c12 --- /dev/null +++ b/components/engine/client/network_disconnect.go @@ -0,0 +1,14 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/network_disconnect_test.go b/components/engine/client/network_disconnect_test.go new file mode 100644 index 0000000000..b54a2b1ccf --- /dev/null +++ b/components/engine/client/network_disconnect_test.go @@ -0,0 +1,64 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkDisconnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkDisconnect(t *testing.T) { + expectedURL := "/networks/network_id/disconnect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var disconnect types.NetworkDisconnect + if err := json.NewDecoder(req.Body).Decode(&disconnect); err != nil { + return nil, err + } + + if disconnect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", disconnect.Container) + } + + if !disconnect.Force { + return nil, fmt.Errorf("expected Force to be true, got %v", disconnect.Force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", true) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/network_inspect.go b/components/engine/client/network_inspect.go new file mode 100644 index 0000000000..7242304025 --- /dev/null +++ b/components/engine/client/network_inspect.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, verbose bool) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, verbose) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, verbose bool) (types.NetworkResource, []byte, error) { + var ( + networkResource types.NetworkResource + resp serverResponse + err error + ) + query := url.Values{} + if verbose { + query.Set("verbose", "true") + } + resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return networkResource, nil, networkNotFoundError{networkID} + } + return networkResource, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/components/engine/client/network_inspect_test.go b/components/engine/client/network_inspect_test.go new file mode 100644 index 0000000000..1504289f5d --- /dev/null +++ b/components/engine/client/network_inspect_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "golang.org/x/net/context" +) + +func TestNetworkInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "nothing", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "unknown", false) + if err == nil || !IsErrNetworkNotFound(err) { + t.Fatalf("expected a networkNotFound error, got %v", err) + } +} + +func TestNetworkInspect(t *testing.T) { + expectedURL := "/networks/network_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + + var ( + content []byte + err error + ) + if strings.HasPrefix(req.URL.RawQuery, "verbose=true") { + s := map[string]network.ServiceInfo{ + "web": {}, + } + content, err = json.Marshal(types.NetworkResource{ + Name: "mynetwork", + Services: s, + }) + } else { + content, err = json.Marshal(types.NetworkResource{ + Name: "mynetwork", + }) + } + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.NetworkInspect(context.Background(), "network_id", false) + if err != nil { + t.Fatal(err) + } + if r.Name != "mynetwork" { + t.Fatalf("expected `mynetwork`, got %s", r.Name) + } + + r, err = client.NetworkInspect(context.Background(), "network_id", true) + if err != nil { + t.Fatal(err) + } + if r.Name != "mynetwork" { + t.Fatalf("expected `mynetwork`, got %s", r.Name) + } + _, ok := r.Services["web"] + if !ok { + t.Fatalf("expected service `web` missing in the verbose output") + } +} diff --git a/components/engine/client/network_list.go b/components/engine/client/network_list.go new file mode 100644 index 0000000000..e566a93e23 --- /dev/null +++ b/components/engine/client/network_list.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + ensureReaderClosed(resp) + return networkResources, err +} diff --git a/components/engine/client/network_list_test.go b/components/engine/client/network_list_test.go new file mode 100644 index 0000000000..4d443496ac --- /dev/null +++ b/components/engine/client/network_list_test.go @@ -0,0 +1,108 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestNetworkListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkList(context.Background(), types.NetworkListOptions{ + Filters: filters.NewArgs(), + }) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkList(t *testing.T) { + expectedURL := "/networks" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + options types.NetworkListOptions + expectedFilters string + }{ + { + options: types.NetworkListOptions{ + Filters: filters.NewArgs(), + }, + expectedFilters: "", + }, { + options: types.NetworkListOptions{ + Filters: noDanglingFilters, + }, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: danglingFilters, + }, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: labelFilters, + }, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal([]types.NetworkResource{ + { + Name: "network", + Driver: "bridge", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResources, err := client.NetworkList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(networkResources) != 1 { + t.Fatalf("expected 1 network resource, got %v", networkResources) + } + } +} diff --git a/components/engine/client/network_prune.go b/components/engine/client/network_prune.go new file mode 100644 index 0000000000..7352a7f0c5 --- /dev/null +++ b/components/engine/client/network_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + if err := cli.NewVersionError("1.25", "network prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/components/engine/client/network_prune_test.go b/components/engine/client/network_prune_test.go new file mode 100644 index 0000000000..3e4f5d0415 --- /dev/null +++ b/components/engine/client/network_prune_test.go @@ -0,0 +1,112 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestNetworksPruneError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + } + + filters := filters.NewArgs() + + _, err := client.NetworksPrune(context.Background(), filters) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworksPrune(t *testing.T) { + expectedURL := "/v1.25/networks/prune" + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + labelFilters := filters.NewArgs() + labelFilters.Add("dangling", "true") + labelFilters.Add("label", "label1=foo") + labelFilters.Add("label", "label2!=bar") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.Args{}, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": "", + }, + }, + { + filters: danglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true}}`, + }, + }, + { + filters: noDanglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + { + filters: labelFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + assert.Equal(t, expected, actual) + } + content, err := json.Marshal(types.NetworksPruneReport{ + NetworksDeleted: []string{"network_id1", "network_id2"}, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.25", + } + + report, err := client.NetworksPrune(context.Background(), listCase.filters) + assert.NoError(t, err) + assert.Len(t, report.NetworksDeleted, 2) + } +} diff --git a/components/engine/client/network_remove.go b/components/engine/client/network_remove.go new file mode 100644 index 0000000000..6bd6748924 --- /dev/null +++ b/components/engine/client/network_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/network_remove_test.go b/components/engine/client/network_remove_test.go new file mode 100644 index 0000000000..2a7b9640c1 --- /dev/null +++ b/components/engine/client/network_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestNetworkRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkRemove(t *testing.T) { + expectedURL := "/networks/network_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/node_inspect.go b/components/engine/client/node_inspect.go new file mode 100644 index 0000000000..abf505d29c --- /dev/null +++ b/components/engine/client/node_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Node{}, nil, nodeNotFoundError{nodeID} + } + return swarm.Node{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/components/engine/client/node_inspect_test.go b/components/engine/client/node_inspect_test.go new file mode 100644 index 0000000000..dca16a8cdc --- /dev/null +++ b/components/engine/client/node_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeInspectNodeNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrNodeNotFound(err) { + t.Fatalf("expected a nodeNotFoundError error, got %v", err) + } +} + +func TestNodeInspect(t *testing.T) { + expectedURL := "/nodes/node_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Node{ + ID: "node_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodeInspect, _, err := client.NodeInspectWithRaw(context.Background(), "node_id") + if err != nil { + t.Fatal(err) + } + if nodeInspect.ID != "node_id" { + t.Fatalf("expected `node_id`, got %s", nodeInspect.ID) + } +} diff --git a/components/engine/client/node_list.go b/components/engine/client/node_list.go new file mode 100644 index 0000000000..3e8440f08e --- /dev/null +++ b/components/engine/client/node_list.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + ensureReaderClosed(resp) + return nodes, err +} diff --git a/components/engine/client/node_list_test.go b/components/engine/client/node_list_test.go new file mode 100644 index 0000000000..0251b5cce4 --- /dev/null +++ b/components/engine/client/node_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NodeList(context.Background(), types.NodeListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeList(t *testing.T) { + expectedURL := "/nodes" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.NodeListOptions + expectedQueryParams map[string]string + }{ + { + options: types.NodeListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.NodeListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Node{ + { + ID: "node_id1", + }, + { + ID: "node_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodes, err := client.NodeList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(nodes) != 2 { + t.Fatalf("expected 2 nodes, got %v", nodes) + } + } +} diff --git a/components/engine/client/node_remove.go b/components/engine/client/node_remove.go new file mode 100644 index 0000000000..0a77f3d578 --- /dev/null +++ b/components/engine/client/node_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/node_remove_test.go b/components/engine/client/node_remove_test.go new file mode 100644 index 0000000000..f2f8adc4a3 --- /dev/null +++ b/components/engine/client/node_remove_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestNodeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: false}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeRemove(t *testing.T) { + expectedURL := "/nodes/node_id" + + removeCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != removeCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", removeCase.expectedForce, force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: removeCase.force}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/components/engine/client/node_update.go b/components/engine/client/node_update.go new file mode 100644 index 0000000000..3ca9760282 --- /dev/null +++ b/components/engine/client/node_update.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/node_update_test.go b/components/engine/client/node_update_test.go new file mode 100644 index 0000000000..613ff104eb --- /dev/null +++ b/components/engine/client/node_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestNodeUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeUpdate(t *testing.T) { + expectedURL := "/nodes/node_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/parse_logs.go b/components/engine/client/parse_logs.go new file mode 100644 index 0000000000..e427f80a77 --- /dev/null +++ b/components/engine/client/parse_logs.go @@ -0,0 +1,41 @@ +package client + +// parse_logs.go contains utility helpers for getting information out of docker +// log lines. really, it only contains ParseDetails right now. maybe in the +// future there will be some desire to parse log messages back into a struct? +// that would go here if we did + +import ( + "net/url" + "strings" + + "github.com/pkg/errors" +) + +// ParseLogDetails takes a details string of key value pairs in the form +// "k=v,l=w", where the keys and values are url query escaped, and each pair +// is separated by a comma, returns a map. returns an error if the details +// string is not in a valid format +// the exact form of details encoding is implemented in +// api/server/httputils/write_log_stream.go +func ParseLogDetails(details string) (map[string]string, error) { + pairs := strings.Split(details, ",") + detailsMap := make(map[string]string, len(pairs)) + for _, pair := range pairs { + p := strings.SplitN(pair, "=", 2) + // if there is no equals sign, we will only get 1 part back + if len(p) != 2 { + return nil, errors.New("invalid details format") + } + k, err := url.QueryUnescape(p[0]) + if err != nil { + return nil, err + } + v, err := url.QueryUnescape(p[1]) + if err != nil { + return nil, err + } + detailsMap[k] = v + } + return detailsMap, nil +} diff --git a/components/engine/client/parse_logs_test.go b/components/engine/client/parse_logs_test.go new file mode 100644 index 0000000000..ac7f61679d --- /dev/null +++ b/components/engine/client/parse_logs_test.go @@ -0,0 +1,36 @@ +package client + +import ( + "reflect" + "testing" + + "github.com/pkg/errors" +) + +func TestParseLogDetails(t *testing.T) { + testCases := []struct { + line string + expected map[string]string + err error + }{ + {"key=value", map[string]string{"key": "value"}, nil}, + {"key1=value1,key2=value2", map[string]string{"key1": "value1", "key2": "value2"}, nil}, + {"key+with+spaces=value%3Dequals,asdf%2C=", map[string]string{"key with spaces": "value=equals", "asdf,": ""}, nil}, + {"key=,=nothing", map[string]string{"key": "", "": "nothing"}, nil}, + {"=", map[string]string{"": ""}, nil}, + {"errors", nil, errors.New("invalid details format")}, + } + for _, tc := range testCases { + tc := tc // capture range variable + t.Run(tc.line, func(t *testing.T) { + t.Parallel() + res, err := ParseLogDetails(tc.line) + if err != nil && (err.Error() != tc.err.Error()) { + t.Fatalf("unexpected error parsing logs:\nExpected:\n\t%v\nActual:\n\t%v", tc.err, err) + } + if !reflect.DeepEqual(tc.expected, res) { + t.Errorf("result does not match expected:\nExpected:\n\t%#v\nActual:\n\t%#v", tc.expected, res) + } + }) + } +} diff --git a/components/engine/client/ping.go b/components/engine/client/ping.go new file mode 100644 index 0000000000..d6212ef8bb --- /dev/null +++ b/components/engine/client/ping.go @@ -0,0 +1,32 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + req, err := cli.buildRequest("GET", fmt.Sprintf("%s/_ping", cli.basePath), nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err != nil { + return ping, err + } + defer ensureReaderClosed(serverResp) + + ping.APIVersion = serverResp.header.Get("API-Version") + + if serverResp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + + ping.OSType = serverResp.header.Get("OSType") + + return ping, nil +} diff --git a/components/engine/client/plugin_create.go b/components/engine/client/plugin_create.go new file mode 100644 index 0000000000..27954aa573 --- /dev/null +++ b/components/engine/client/plugin_create.go @@ -0,0 +1,26 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/x-tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + if err != nil { + return err + } + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/plugin_disable.go b/components/engine/client/plugin_disable.go new file mode 100644 index 0000000000..30467db742 --- /dev/null +++ b/components/engine/client/plugin_disable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/plugin_disable_test.go b/components/engine/client/plugin_disable_test.go new file mode 100644 index 0000000000..a4de45be2d --- /dev/null +++ b/components/engine/client/plugin_disable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginDisableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginDisable(t *testing.T) { + expectedURL := "/plugins/plugin_name/disable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/plugin_enable.go b/components/engine/client/plugin_enable.go new file mode 100644 index 0000000000..95517c4b80 --- /dev/null +++ b/components/engine/client/plugin_enable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/plugin_enable_test.go b/components/engine/client/plugin_enable_test.go new file mode 100644 index 0000000000..b27681348f --- /dev/null +++ b/components/engine/client/plugin_enable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginEnableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginEnable(t *testing.T) { + expectedURL := "/plugins/plugin_name/enable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/plugin_inspect.go b/components/engine/client/plugin_inspect.go new file mode 100644 index 0000000000..89f39ee2c6 --- /dev/null +++ b/components/engine/client/plugin_inspect.go @@ -0,0 +1,32 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginInspectWithRaw inspects an existing plugin +func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return nil, nil, pluginNotFoundError{name} + } + return nil, nil, err + } + + defer ensureReaderClosed(resp) + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, nil, err + } + var p types.Plugin + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&p) + return &p, body, err +} diff --git a/components/engine/client/plugin_inspect_test.go b/components/engine/client/plugin_inspect_test.go new file mode 100644 index 0000000000..fae407eb9b --- /dev/null +++ b/components/engine/client/plugin_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.PluginInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginInspect(t *testing.T) { + expectedURL := "/plugins/plugin_name" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.Plugin{ + ID: "plugin_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + pluginInspect, _, err := client.PluginInspectWithRaw(context.Background(), "plugin_name") + if err != nil { + t.Fatal(err) + } + if pluginInspect.ID != "plugin_id" { + t.Fatalf("expected `plugin_id`, got %s", pluginInspect.ID) + } +} diff --git a/components/engine/client/plugin_install.go b/components/engine/client/plugin_install.go new file mode 100644 index 0000000000..ce3e0506e5 --- /dev/null +++ b/components/engine/client/plugin_install.go @@ -0,0 +1,113 @@ +package client + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return nil, err + } + + name = resp.header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.body) + if err != nil { + pw.CloseWithError(err) + return + } + defer func() { + if err != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + pw.CloseWithError(err) + return + } + } + + if options.Disabled { + pw.Close() + return + } + + enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(enableErr) + }() + return pr, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/plugins/privileges", query, headers) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, privileges, headers) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + // todo: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.RegistryAuth = newAuthHeader + resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, pluginPermissionDenied{options.RemoteRef} + } + } + return privileges, nil +} diff --git a/components/engine/client/plugin_list.go b/components/engine/client/plugin_list.go new file mode 100644 index 0000000000..3acde3b966 --- /dev/null +++ b/components/engine/client/plugin_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return plugins, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/plugins", query, nil) + if err != nil { + return plugins, err + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + ensureReaderClosed(resp) + return plugins, err +} diff --git a/components/engine/client/plugin_list_test.go b/components/engine/client/plugin_list_test.go new file mode 100644 index 0000000000..6887079b42 --- /dev/null +++ b/components/engine/client/plugin_list_test.go @@ -0,0 +1,107 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestPluginListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginList(context.Background(), filters.NewArgs()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginList(t *testing.T) { + expectedURL := "/plugins" + + enabledFilters := filters.NewArgs() + enabledFilters.Add("enabled", "true") + + capabilityFilters := filters.NewArgs() + capabilityFilters.Add("capability", "volumedriver") + capabilityFilters.Add("capability", "authz") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.NewArgs(), + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": "", + }, + }, + { + filters: enabledFilters, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"enabled":{"true":true}}`, + }, + }, + { + filters: capabilityFilters, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"capability":{"authz":true,"volumedriver":true}}`, + }, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]*types.Plugin{ + { + ID: "plugin_id1", + }, + { + ID: "plugin_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + plugins, err := client.PluginList(context.Background(), listCase.filters) + if err != nil { + t.Fatal(err) + } + if len(plugins) != 2 { + t.Fatalf("expected 2 plugins, got %v", plugins) + } + } +} diff --git a/components/engine/client/plugin_push.go b/components/engine/client/plugin_push.go new file mode 100644 index 0000000000..1e5f963251 --- /dev/null +++ b/components/engine/client/plugin_push.go @@ -0,0 +1,17 @@ +package client + +import ( + "io" + + "golang.org/x/net/context" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/components/engine/client/plugin_push_test.go b/components/engine/client/plugin_push_test.go new file mode 100644 index 0000000000..d9f70cdff8 --- /dev/null +++ b/components/engine/client/plugin_push_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginPushError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginPush(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + auth := req.Header.Get("X-Registry-Auth") + if auth != "authtoken" { + return nil, fmt.Errorf("Invalid auth header : expected 'authtoken', got %s", auth) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "authtoken") + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/plugin_remove.go b/components/engine/client/plugin_remove.go new file mode 100644 index 0000000000..b017e4d348 --- /dev/null +++ b/components/engine/client/plugin_remove.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/plugin_remove_test.go b/components/engine/client/plugin_remove_test.go new file mode 100644 index 0000000000..b2d515793a --- /dev/null +++ b/components/engine/client/plugin_remove_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestPluginRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginRemove(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/plugin_set.go b/components/engine/client/plugin_set.go new file mode 100644 index 0000000000..3260d2a90d --- /dev/null +++ b/components/engine/client/plugin_set.go @@ -0,0 +1,12 @@ +package client + +import ( + "golang.org/x/net/context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/plugin_set_test.go b/components/engine/client/plugin_set_test.go new file mode 100644 index 0000000000..2450254463 --- /dev/null +++ b/components/engine/client/plugin_set_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginSetError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginSet(t *testing.T) { + expectedURL := "/plugins/plugin_name/set" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{"arg1"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/plugin_upgrade.go b/components/engine/client/plugin_upgrade.go new file mode 100644 index 0000000000..24293c5073 --- /dev/null +++ b/components/engine/client/plugin_upgrade.go @@ -0,0 +1,37 @@ +package client + +import ( + "fmt" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, fmt.Sprintf("/plugins/%s/upgrade", name), query, privileges, headers) +} diff --git a/components/engine/client/request.go b/components/engine/client/request.go new file mode 100644 index 0000000000..6457b316a3 --- /dev/null +++ b/components/engine/client/request.go @@ -0,0 +1,247 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) +} + +// get sends an http request to the docker API using the method GET with a specific Go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "GET", path, query, nil, headers) +} + +// post sends an http request to the docker API using the method POST with a specific Go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// putRaw sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) +} + +type headers map[string][]string + +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil + } + + body, err := encodeData(obj) + if err != nil { + return nil, headers, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} + +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { + expectedPayload := (method == "POST" || method == "PUT") + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + return cli.doRequest(ctx, req) +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1} + + resp, err := ctxhttp.Do(ctx, cli.client, req) + if err != nil { + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { + return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + switch err { + case context.Canceled, context.DeadlineExceeded: + return serverResp, err + } + + if nErr, ok := err.(*url.Error); ok { + if nErr, ok := nErr.Err.(*net.OpError); ok { + if os.IsPermission(nErr.Err) { + return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + } + } + } + + if err, ok := err.(net.Error); ok { + if err.Timeout() { + return serverResp, ErrorConnectionFailed(cli.host) + } + if !err.Temporary() { + if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrorConnectionFailed(cli.host) + } + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") + } + + return serverResp, errors.Wrap(err, "error during connect") + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + } + + if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return serverResp, err + } + if len(body) == 0 { + return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && + resp.Header.Get("Content-Type") == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return serverResp, fmt.Errorf("Error reading JSON: %v", err) + } + errorMessage = errorResponse.Message + } else { + errorMessage = string(body) + } + + return serverResp, fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) + } + + serverResp.body = resp.Body + serverResp.header = resp.Header + return serverResp, nil +} + +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + continue + } + req.Header.Set(k, v) + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + return req +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response serverResponse) { + if body := response.body; body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(ioutil.Discard, body, 512) + response.body.Close() + } +} diff --git a/components/engine/client/request_test.go b/components/engine/client/request_test.go new file mode 100644 index 0000000000..63908aec4b --- /dev/null +++ b/components/engine/client/request_test.go @@ -0,0 +1,92 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// TestSetHostHeader should set fake host for local communications, set real host +// for normal communications. +func TestSetHostHeader(t *testing.T) { + testURL := "/test" + testCases := []struct { + host string + expectedHost string + expectedURLHost string + }{ + { + "unix:///var/run/docker.sock", + "docker", + "/var/run/docker.sock", + }, + { + "npipe:////./pipe/docker_engine", + "docker", + "//./pipe/docker_engine", + }, + { + "tcp://0.0.0.0:4243", + "", + "0.0.0.0:4243", + }, + { + "tcp://localhost:4243", + "", + "localhost:4243", + }, + } + + for c, test := range testCases { + proto, addr, basePath, err := ParseHost(test.host) + if err != nil { + t.Fatal(err) + } + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, testURL) { + return nil, fmt.Errorf("Test Case #%d: Expected URL %q, got %q", c, testURL, req.URL) + } + if req.Host != test.expectedHost { + return nil, fmt.Errorf("Test Case #%d: Expected host %q, got %q", c, test.expectedHost, req.Host) + } + if req.URL.Host != test.expectedURLHost { + return nil, fmt.Errorf("Test Case #%d: Expected URL host %q, got %q", c, test.expectedURLHost, req.URL.Host) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(([]byte("")))), + }, nil + }), + + proto: proto, + addr: addr, + basePath: basePath, + } + + _, err = client.sendRequest(context.Background(), "GET", testURL, nil, nil, nil) + if err != nil { + t.Fatal(err) + } + } +} + +// TestPlainTextError tests the server returning an error in plain text for +// backwards compatibility with API versions <1.24. All other tests use +// errors returned as JSON +func TestPlainTextError(t *testing.T) { + client := &Client{ + client: newMockClient(plainTextErrorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} diff --git a/components/engine/client/secret_create.go b/components/engine/client/secret_create.go new file mode 100644 index 0000000000..b5325a560f --- /dev/null +++ b/components/engine/client/secret_create.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretCreate creates a new Secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var response types.SecretCreateResponse + resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/components/engine/client/secret_create_test.go b/components/engine/client/secret_create_test.go new file mode 100644 index 0000000000..cb378c77ff --- /dev/null +++ b/components/engine/client/secret_create_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretCreate(t *testing.T) { + expectedURL := "/secrets/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.SecretCreateResponse{ + ID: "test_secret", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusCreated, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "test_secret" { + t.Fatalf("expected `test_secret`, got %s", r.ID) + } +} diff --git a/components/engine/client/secret_inspect.go b/components/engine/client/secret_inspect.go new file mode 100644 index 0000000000..f774576118 --- /dev/null +++ b/components/engine/client/secret_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return swarm.Secret{}, nil, secretNotFoundError{id} + } + return swarm.Secret{}, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/components/engine/client/secret_inspect_test.go b/components/engine/client/secret_inspect_test.go new file mode 100644 index 0000000000..0142a3ca9f --- /dev/null +++ b/components/engine/client/secret_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretInspectSecretNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrSecretNotFound(err) { + t.Fatalf("expected a secretNotFoundError error, got %v", err) + } +} + +func TestSecretInspect(t *testing.T) { + expectedURL := "/secrets/secret_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Secret{ + ID: "secret_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secretInspect, _, err := client.SecretInspectWithRaw(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } + if secretInspect.ID != "secret_id" { + t.Fatalf("expected `secret_id`, got %s", secretInspect.ID) + } +} diff --git a/components/engine/client/secret_list.go b/components/engine/client/secret_list.go new file mode 100644 index 0000000000..7e9d5ec167 --- /dev/null +++ b/components/engine/client/secret_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + ensureReaderClosed(resp) + return secrets, err +} diff --git a/components/engine/client/secret_list_test.go b/components/engine/client/secret_list_test.go new file mode 100644 index 0000000000..1ac11cddb3 --- /dev/null +++ b/components/engine/client/secret_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SecretList(context.Background(), types.SecretListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretList(t *testing.T) { + expectedURL := "/secrets" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.SecretListOptions + expectedQueryParams map[string]string + }{ + { + options: types.SecretListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.SecretListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Secret{ + { + ID: "secret_id1", + }, + { + ID: "secret_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secrets, err := client.SecretList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(secrets) != 2 { + t.Fatalf("expected 2 secrets, got %v", secrets) + } + } +} diff --git a/components/engine/client/secret_remove.go b/components/engine/client/secret_remove.go new file mode 100644 index 0000000000..1955b988a9 --- /dev/null +++ b/components/engine/client/secret_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// SecretRemove removes a Secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/secret_remove_test.go b/components/engine/client/secret_remove_test.go new file mode 100644 index 0000000000..f269f787d2 --- /dev/null +++ b/components/engine/client/secret_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSecretRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretRemove(t *testing.T) { + expectedURL := "/secrets/secret_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/secret_update.go b/components/engine/client/secret_update.go new file mode 100644 index 0000000000..3af5287068 --- /dev/null +++ b/components/engine/client/secret_update.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretUpdate attempts to update a Secret +func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/secret_update_test.go b/components/engine/client/secret_update_test.go new file mode 100644 index 0000000000..c620985bd5 --- /dev/null +++ b/components/engine/client/secret_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSecretUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretUpdate(t *testing.T) { + expectedURL := "/secrets/secret_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/service_create.go b/components/engine/client/service_create.go new file mode 100644 index 0000000000..e78f35fb8f --- /dev/null +++ b/components/engine/client/service_create.go @@ -0,0 +1,95 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +// ServiceCreate creates a new Service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var distErr error + + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + // Contact the registry to retrieve digest and platform information + if options.QueryRegistry { + distributionInspect, err := cli.DistributionInspect(ctx, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + distErr = err + if err == nil { + // now pin by digest if the image doesn't already contain a digest + img := imageWithDigestString(service.TaskTemplate.ContainerSpec.Image, distributionInspect.Descriptor.Digest) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + // add platforms that are compatible with the service + service.TaskTemplate.Placement = updateServicePlatforms(service.TaskTemplate.Placement, distributionInspect) + } + } + var response types.ServiceCreateResponse + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + + ensureReaderClosed(resp) + return response, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It assumes +// that the image string is not an image ID +func imageWithDigestString(image string, dgst digest.Digest) string { + ref, err := reference.ParseAnyReference(image) + if err == nil { + if _, isCanonical := ref.(reference.Canonical); !isCanonical { + namedRef, _ := ref.(reference.Named) + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return img.String() + } + } + } + return "" +} + +// updateServicePlatforms updates the Platforms in swarm.Placement to list +// all compatible platforms for the service, as found in distributionInspect +// and returns a pointer to the new or updated swarm.Placement struct +func updateServicePlatforms(placement *swarm.Placement, distributionInspect registrytypes.DistributionInspect) *swarm.Placement { + if placement == nil { + placement = &swarm.Placement{} + } + for _, p := range distributionInspect.Platforms { + placement.Platforms = append(placement.Platforms, swarm.Platform{ + Architecture: p.Architecture, + OS: p.OS, + }) + } + return placement +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} diff --git a/components/engine/client/service_create_test.go b/components/engine/client/service_create_test.go new file mode 100644 index 0000000000..2ece62b993 --- /dev/null +++ b/components/engine/client/service_create_test.go @@ -0,0 +1,123 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/net/context" +) + +func TestServiceCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceCreate(t *testing.T) { + expectedURL := "/services/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", r.ID) + } +} + +func TestServiceCreateCompatiblePlatforms(t *testing.T) { + var ( + platforms []v1.Platform + distributionInspectBody io.ReadCloser + distributionInspect registrytypes.DistributionInspect + ) + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if strings.HasPrefix(req.URL.Path, "/services/create") { + // check if the /distribution endpoint returned correct output + err := json.NewDecoder(distributionInspectBody).Decode(&distributionInspect) + if err != nil { + return nil, err + } + if len(distributionInspect.Platforms) == 0 || distributionInspect.Platforms[0].Architecture != platforms[0].Architecture || distributionInspect.Platforms[0].OS != platforms[0].OS { + return nil, fmt.Errorf("received incorrect platform information from registry") + } + + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_" + platforms[0].Architecture, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } else if strings.HasPrefix(req.URL.Path, "/distribution/") { + platforms = []v1.Platform{ + { + Architecture: "amd64", + OS: "linux", + }, + } + b, err := json.Marshal(registrytypes.DistributionInspect{ + Descriptor: v1.Descriptor{}, + Platforms: platforms, + }) + if err != nil { + return nil, err + } + distributionInspectBody = ioutil.NopCloser(bytes.NewReader(b)) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } else { + return nil, fmt.Errorf("unexpected URL '%s'", req.URL.Path) + } + }), + } + + r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{QueryRegistry: true}) + if err != nil { + t.Fatal(err) + } + if r.ID != "service_amd64" { + t.Fatalf("expected `service_amd64`, got %s", r.ID) + } +} diff --git a/components/engine/client/service_inspect.go b/components/engine/client/service_inspect.go new file mode 100644 index 0000000000..d7e051e3a4 --- /dev/null +++ b/components/engine/client/service_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) + serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Service{}, nil, serviceNotFoundError{serviceID} + } + return swarm.Service{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/components/engine/client/service_inspect_test.go b/components/engine/client/service_inspect_test.go new file mode 100644 index 0000000000..d53f583e90 --- /dev/null +++ b/components/engine/client/service_inspect_test.go @@ -0,0 +1,66 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "nothing", types.ServiceInspectOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceInspectServiceNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "unknown", types.ServiceInspectOptions{}) + if err == nil || !IsErrServiceNotFound(err) { + t.Fatalf("expected a serviceNotFoundError error, got %v", err) + } +} + +func TestServiceInspect(t *testing.T) { + expectedURL := "/services/service_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Service{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + serviceInspect, _, err := client.ServiceInspectWithRaw(context.Background(), "service_id", types.ServiceInspectOptions{}) + if err != nil { + t.Fatal(err) + } + if serviceInspect.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", serviceInspect.ID) + } +} diff --git a/components/engine/client/service_list.go b/components/engine/client/service_list.go new file mode 100644 index 0000000000..c29e6d407d --- /dev/null +++ b/components/engine/client/service_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/services", query, nil) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + ensureReaderClosed(resp) + return services, err +} diff --git a/components/engine/client/service_list_test.go b/components/engine/client/service_list_test.go new file mode 100644 index 0000000000..213981ef70 --- /dev/null +++ b/components/engine/client/service_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceList(context.Background(), types.ServiceListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceList(t *testing.T) { + expectedURL := "/services" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.ServiceListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ServiceListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.ServiceListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Service{ + { + ID: "service_id1", + }, + { + ID: "service_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + services, err := client.ServiceList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(services) != 2 { + t.Fatalf("expected 2 services, got %v", services) + } + } +} diff --git a/components/engine/client/service_logs.go b/components/engine/client/service_logs.go new file mode 100644 index 0000000000..24384e3ec0 --- /dev/null +++ b/components/engine/client/service_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/components/engine/client/service_logs_test.go b/components/engine/client/service_logs_test.go new file mode 100644 index 0000000000..a6d002ba75 --- /dev/null +++ b/components/engine/client/service_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestServiceLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestServiceLogs(t *testing.T) { + expectedURL := "/services/service_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ServiceLogs(context.Background(), "service_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ServiceLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ServiceLogs(ctx, "service_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/components/engine/client/service_remove.go b/components/engine/client/service_remove.go new file mode 100644 index 0000000000..a9331f92c2 --- /dev/null +++ b/components/engine/client/service_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/service_remove_test.go b/components/engine/client/service_remove_test.go new file mode 100644 index 0000000000..8e2ac259c1 --- /dev/null +++ b/components/engine/client/service_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestServiceRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceRemove(t *testing.T) { + expectedURL := "/services/service_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/service_update.go b/components/engine/client/service_update.go new file mode 100644 index 0000000000..0cb35a1991 --- /dev/null +++ b/components/engine/client/service_update.go @@ -0,0 +1,68 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceUpdate updates a Service. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + var ( + query = url.Values{} + distErr error + ) + + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", options.RegistryAuthFrom) + } + + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + + query.Set("version", strconv.FormatUint(version.Index, 10)) + + // Contact the registry to retrieve digest and platform information + // This happens only when the image has changed + if options.QueryRegistry { + distributionInspect, err := cli.DistributionInspect(ctx, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + distErr = err + if err == nil { + // now pin by digest if the image doesn't already contain a digest + img := imageWithDigestString(service.TaskTemplate.ContainerSpec.Image, distributionInspect.Descriptor.Digest) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + // add platforms that are compatible with the service + service.TaskTemplate.Placement = updateServicePlatforms(service.TaskTemplate.Placement, distributionInspect) + } + } + + var response types.ServiceUpdateResponse + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + + ensureReaderClosed(resp) + return response, err +} diff --git a/components/engine/client/service_update_test.go b/components/engine/client/service_update_test.go new file mode 100644 index 0000000000..76bea176bf --- /dev/null +++ b/components/engine/client/service_update_test.go @@ -0,0 +1,77 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +func TestServiceUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", swarm.Version{}, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceUpdate(t *testing.T) { + expectedURL := "/services/service_id/update" + + updateCases := []struct { + swarmVersion swarm.Version + expectedVersion string + }{ + { + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 0, + }, + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 10, + }, + expectedVersion: "10", + }, + } + + for _, updateCase := range updateCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + version := req.URL.Query().Get("version") + if version != updateCase.expectedVersion { + return nil, fmt.Errorf("version not set in URL query properly, expected '%s', got %s", updateCase.expectedVersion, version) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), + }, nil + }), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", updateCase.swarmVersion, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/components/engine/client/swarm_get_unlock_key.go b/components/engine/client/swarm_get_unlock_key.go new file mode 100644 index 0000000000..be28d32628 --- /dev/null +++ b/components/engine/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/components/engine/client/swarm_get_unlock_key_test.go b/components/engine/client/swarm_get_unlock_key_test.go new file mode 100644 index 0000000000..8dd08d95f2 --- /dev/null +++ b/components/engine/client/swarm_get_unlock_key_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +func TestSwarmGetUnlockKeyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmGetUnlockKey(context.Background()) + testutil.ErrorContains(t, err, "Error response from daemon: Server error") +} + +func TestSwarmGetUnlockKey(t *testing.T) { + expectedURL := "/swarm/unlockkey" + unlockKey := "SWMKEY-1-y6guTZNTwpQeTL5RhUfOsdBdXoQjiB2GADHSRJvbXeE" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + + key := types.SwarmUnlockKeyResponse{ + UnlockKey: unlockKey, + } + + b, err := json.Marshal(key) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + resp, err := client.SwarmGetUnlockKey(context.Background()) + require.NoError(t, err) + assert.Equal(t, unlockKey, resp.UnlockKey) +} diff --git a/components/engine/client/swarm_init.go b/components/engine/client/swarm_init.go new file mode 100644 index 0000000000..9e65e1cca4 --- /dev/null +++ b/components/engine/client/swarm_init.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInit initializes the swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/components/engine/client/swarm_init_test.go b/components/engine/client/swarm_init_test.go new file mode 100644 index 0000000000..811155aff4 --- /dev/null +++ b/components/engine/client/swarm_init_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmInitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInit(context.Background(), swarm.InitRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInit(t *testing.T) { + expectedURL := "/swarm/init" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(`"body"`))), + }, nil + }), + } + + resp, err := client.SwarmInit(context.Background(), swarm.InitRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } + if resp != "body" { + t.Fatalf("Expected 'body', got %s", resp) + } +} diff --git a/components/engine/client/swarm_inspect.go b/components/engine/client/swarm_inspect.go new file mode 100644 index 0000000000..77e72f8466 --- /dev/null +++ b/components/engine/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInspect inspects the swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/components/engine/client/swarm_inspect_test.go b/components/engine/client/swarm_inspect_test.go new file mode 100644 index 0000000000..6432d172b4 --- /dev/null +++ b/components/engine/client/swarm_inspect_test.go @@ -0,0 +1,56 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSwarmInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInspect(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInspect(t *testing.T) { + expectedURL := "/swarm" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Swarm{ + ClusterInfo: swarm.ClusterInfo{ + ID: "swarm_id", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + swarmInspect, err := client.SwarmInspect(context.Background()) + if err != nil { + t.Fatal(err) + } + if swarmInspect.ID != "swarm_id" { + t.Fatalf("expected `swarm_id`, got %s", swarmInspect.ID) + } +} diff --git a/components/engine/client/swarm_join.go b/components/engine/client/swarm_join.go new file mode 100644 index 0000000000..19e5192b9e --- /dev/null +++ b/components/engine/client/swarm_join.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmJoin joins the swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/swarm_join_test.go b/components/engine/client/swarm_join_test.go new file mode 100644 index 0000000000..31ef2a76ee --- /dev/null +++ b/components/engine/client/swarm_join_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmJoinError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmJoin(t *testing.T) { + expectedURL := "/swarm/join" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/swarm_leave.go b/components/engine/client/swarm_leave.go new file mode 100644 index 0000000000..3a205cf3b5 --- /dev/null +++ b/components/engine/client/swarm_leave.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// SwarmLeave leaves the swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/swarm_leave_test.go b/components/engine/client/swarm_leave_test.go new file mode 100644 index 0000000000..c96dac8120 --- /dev/null +++ b/components/engine/client/swarm_leave_test.go @@ -0,0 +1,66 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSwarmLeaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmLeave(context.Background(), false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmLeave(t *testing.T) { + expectedURL := "/swarm/leave" + + leaveCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, leaveCase := range leaveCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != leaveCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", leaveCase.expectedForce, force) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmLeave(context.Background(), leaveCase.force) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/components/engine/client/swarm_unlock.go b/components/engine/client/swarm_unlock.go new file mode 100644 index 0000000000..9ee441fed2 --- /dev/null +++ b/components/engine/client/swarm_unlock.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUnlock unlocks locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/components/engine/client/swarm_update.go b/components/engine/client/swarm_update.go new file mode 100644 index 0000000000..7245fd4e38 --- /dev/null +++ b/components/engine/client/swarm_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "fmt" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUpdate updates the swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) + query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/swarm_update_test.go b/components/engine/client/swarm_update_test.go new file mode 100644 index 0000000000..3b23db078f --- /dev/null +++ b/components/engine/client/swarm_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmUpdate(t *testing.T) { + expectedURL := "/swarm/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/client/task_inspect.go b/components/engine/client/task_inspect.go new file mode 100644 index 0000000000..bc8058fc32 --- /dev/null +++ b/components/engine/client/task_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + + "golang.org/x/net/context" +) + +// TaskInspectWithRaw returns the task information and its raw representation.. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Task{}, nil, taskNotFoundError{taskID} + } + return swarm.Task{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/components/engine/client/task_inspect_test.go b/components/engine/client/task_inspect_test.go new file mode 100644 index 0000000000..148cdad3a7 --- /dev/null +++ b/components/engine/client/task_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.TaskInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskInspect(t *testing.T) { + expectedURL := "/tasks/task_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Task{ + ID: "task_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + taskInspect, _, err := client.TaskInspectWithRaw(context.Background(), "task_id") + if err != nil { + t.Fatal(err) + } + if taskInspect.ID != "task_id" { + t.Fatalf("expected `task_id`, got %s", taskInspect.ID) + } +} diff --git a/components/engine/client/task_list.go b/components/engine/client/task_list.go new file mode 100644 index 0000000000..66324da959 --- /dev/null +++ b/components/engine/client/task_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + ensureReaderClosed(resp) + return tasks, err +} diff --git a/components/engine/client/task_list_test.go b/components/engine/client/task_list_test.go new file mode 100644 index 0000000000..2a9a4c4346 --- /dev/null +++ b/components/engine/client/task_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.TaskList(context.Background(), types.TaskListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskList(t *testing.T) { + expectedURL := "/tasks" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.TaskListOptions + expectedQueryParams map[string]string + }{ + { + options: types.TaskListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.TaskListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Task{ + { + ID: "task_id1", + }, + { + ID: "task_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + tasks, err := client.TaskList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(tasks) != 2 { + t.Fatalf("expected 2 tasks, got %v", tasks) + } + } +} diff --git a/components/engine/client/task_logs.go b/components/engine/client/task_logs.go new file mode 100644 index 0000000000..2ed19543a4 --- /dev/null +++ b/components/engine/client/task_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// TaskLogs returns the logs generated by a task in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/components/engine/client/testdata/ca.pem b/components/engine/client/testdata/ca.pem new file mode 100644 index 0000000000..ad14d47065 --- /dev/null +++ b/components/engine/client/testdata/ca.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC0jCCAbqgAwIBAgIRAILlP5WWLaHkQ/m2ASHP7SowDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHdmluY2VudDAeFw0xNjAzMjQxMDE5MDBaFw0xOTAzMDkxMDE5 +MDBaMBIxEDAOBgNVBAoTB3ZpbmNlbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQD0yZPKAGncoaxaU/QW9tWEHbrvDoGVF/65L8Si/jBrlAgLjhmmV1di +vKG9QPzuU8snxHro3/uCwyA6kTqw0U8bGwHxJq2Bpa6JBYj8N2jMJ+M+sjXgSo2t +E0zIzjTW2Pir3C8qwfrVL6NFp9xClwMD23SFZ0UsEH36NkfyrKBVeM8IOjJd4Wjs +xIcuvF3BTVkji84IJBW2JIKf9ZrzJwUlSCPgptRp4Evdbyp5d+UPxtwxD7qjW4lM +yQQ8vfcC4lKkVx5s/RNJ4fzd5uEgLdEbZ20qt7Zt/bLcxFHpUhH2teA0QjmrOWFh +gbL83s95/+hbSVhsO4hoFW7vTeiCCY4xAgMBAAGjIzAhMA4GA1UdDwEB/wQEAwIC +rDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBY51RHajuDuhO2 +tcm26jeNROzfffnjhvbOVPjSEdo9vI3JpMU/RuQw+nbNcLwJrdjL6UH7tD/36Y+q +NXH+xSIjWFH0zXGxrIUsVrvt6f8CbOvw7vD+gygOG+849PDQMbL6czP8rvXY7vZV +9pdpQfrENk4b5kePRW/6HaGSTvtgN7XOrYD9fp3pm/G534T2e3IxgYMRNwdB9Ul9 +bLwMqQqf4eiqqMs6x4IVmZUkGVMKiFKcvkNg9a+Ozx5pMizHeAezWMcZ5V+QJZVT +8lElSCKZ2Yy2xkcl7aeQMLwcAeZwfTp+Yu9dVzlqXiiBTLd1+LtAQCuKHzmw4Q8k +EvD5m49l +-----END CERTIFICATE----- diff --git a/components/engine/client/testdata/cert.pem b/components/engine/client/testdata/cert.pem new file mode 100644 index 0000000000..9000ffb32b --- /dev/null +++ b/components/engine/client/testdata/cert.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC8DCCAdigAwIBAgIRAJAS1glgcke4q7eCaretwgUwDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHdmluY2VudDAeFw0xNjAzMjQxMDE5MDBaFw0xOTAzMDkxMDE5 +MDBaMB4xHDAaBgNVBAoME3ZpbmNlbnQuPGJvb3RzdHJhcD4wggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQClpvG442dGEvrRgmCrqY4kBml1LVlw2Y7ZDn6B +TKa52+MuGDmfXbO1UhclNqTXjLgAwKjPz/OvnPRxNEUoQEDbBd+Xev7rxTY5TvYI +27YH3fMH2LL2j62jum649abfhZ6ekD5eD8tCn3mnrEOgqRIlK7efPIVixq/ZqU1H +7ez0ggB7dmWHlhnUaxyQOCSnAX/7nKYQXqZgVvGhDeR2jp7GcnhbK/qPrZ/mOm83 +2IjCeYN145opYlzTSp64GYIZz7uqMNcnDKK37ZbS8MYcTjrRaHEiqZVVdIC+ghbx +qYqzbZRVfgztI9jwmifn0mYrN4yt+nhNYwBcRJ4Pv3uLFbo7AgMBAAGjNTAzMA4G +A1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA +MA0GCSqGSIb3DQEBCwUAA4IBAQDg1r7nksjYgDFYEcBbrRrRHddIoK+RVmSBTTrq +8giC77m0srKdh9XTVWK1PUbGfODV1oD8m9QhPE8zPDyYQ8jeXNRSU5wXdkrTRmmY +w/T3SREqmE7CObMtusokHidjYFuqqCR07sJzqBKRlzr3o0EGe3tuEhUlF5ARY028 +eipaDcVlT5ChGcDa6LeJ4e05u4cVap0dd6Rp1w3Rx1AYAecdgtgBMnw1iWdl/nrC +sp26ZXNaAhFOUovlY9VY257AMd9hQV7WvAK4yNEHcckVu3uXTBmDgNSOPtl0QLsL +Kjlj75ksCx8nCln/hCut/0+kGTsGZqdV5c6ktgcGYRir/5Hs +-----END CERTIFICATE----- diff --git a/components/engine/client/testdata/key.pem b/components/engine/client/testdata/key.pem new file mode 100644 index 0000000000..c0869dfc1a --- /dev/null +++ b/components/engine/client/testdata/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApabxuONnRhL60YJgq6mOJAZpdS1ZcNmO2Q5+gUymudvjLhg5 +n12ztVIXJTak14y4AMCoz8/zr5z0cTRFKEBA2wXfl3r+68U2OU72CNu2B93zB9iy +9o+to7puuPWm34WenpA+Xg/LQp95p6xDoKkSJSu3nzyFYsav2alNR+3s9IIAe3Zl +h5YZ1GsckDgkpwF/+5ymEF6mYFbxoQ3kdo6exnJ4Wyv6j62f5jpvN9iIwnmDdeOa +KWJc00qeuBmCGc+7qjDXJwyit+2W0vDGHE460WhxIqmVVXSAvoIW8amKs22UVX4M +7SPY8Jon59JmKzeMrfp4TWMAXESeD797ixW6OwIDAQABAoIBAHfyAAleL8NfrtnR +S+pApbmUIvxD0AWUooispBE/zWG6xC72P5MTqDJctIGvpYCmVf3Fgvamns7EGYN2 +07Sngc6V3Ca1WqyhaffpIuGbJZ1gqr89u6gotRRexBmNVj13ZTlvPJmjWgxtqQsu +AvHsOkVL+HOGwRaaw24Z1umEcBVCepl7PGTqsLeJUtBUZBiqdJTu4JYLAB6BggBI +OxhHoTWvlNWwzezo2C/IXkXcXD/tp3i5vTn5rAXHSMQkdMAUh7/xJ73Fl36gxZhp +W7NoPKaS9qNh8jhs6p54S7tInb6+mrKtvRFKl5XAR3istXrXteT5UaukpuBbQ/5d +qf4BXuECgYEAzoOKxMee5tG/G9iC6ImNq5xGAZm0OnmteNgIEQj49If1Q68av525 +FioqdC9zV+blfHQqXEIUeum4JAou4xqmB8Lw2H0lYwOJ1IkpUy3QJjU1IrI+U5Qy +ryZuA9cxSTLf1AJFbROsoZDpjaBh0uUQkD/4PHpwXMgHu/3CaJ4nTEkCgYEAzVjE +VWgczWJGyRxmHSeR51ft1jrlChZHEd3HwgLfo854JIj+MGUH4KPLSMIkYNuyiwNQ +W7zdXCB47U8afSL/lPTv1M5+ZsWY6sZAT6gtp/IeU0Va943h9cj10fAOBJaz1H6M +jnZS4jjWhVInE7wpCDVCwDRoHHJ84kb6JeflamMCgYBDQDcKie9HP3q6uLE4xMKr +5gIuNz2n5UQGnGNUGNXp2/SVDArr55MEksqsd19aesi01KeOz74XoNDke6R1NJJo +6KTB+08XhWl3GwuoGL02FBGvsNf3I8W1oBAnlAZqzfRx+CNfuA55ttU318jDgvD3 +6L0QBNdef411PNf4dbhacQKBgAd/e0PHFm4lbYJAaDYeUMSKwGN3KQ/SOmwblgSu +iC36BwcGfYmU1tHMCUsx05Q50W4kA9Ylskt/4AqCPexdz8lHnE4/7/uesXO5I3YF +JQ2h2Jufx6+MXbjUyq0Mv+ZI/m3+5PD6vxIFk0ew9T5SO4lSMIrGHxsSzx6QCuhB +bG4TAoGBAJ5PWG7d2CyCjLtfF8J4NxykRvIQ8l/3kDvDdNrXiXbgonojo2lgRYaM +5LoK9ApN8KHdedpTRipBaDA22Sp5SjMcUE7A6q42PJCL9r+BRYF0foFQx/rqpCff +pVWKgwIPoKnfxDqN1RUgyFcx1jbA3XVJZCuT+wbMuDQ9nlvulD1W +-----END RSA PRIVATE KEY----- diff --git a/components/engine/client/transport.go b/components/engine/client/transport.go new file mode 100644 index 0000000000..401ab15d30 --- /dev/null +++ b/components/engine/client/transport.go @@ -0,0 +1,25 @@ +package client + +import ( + "crypto/tls" + "net/http" +) + +// transportFunc allows us to inject a mock transport for testing. We define it +// here so we can detect the tlsconfig and return nil for only this type. +type transportFunc func(*http.Request) (*http.Response, error) + +func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return tf(req) +} + +// resolveTLSConfig attempts to resolve the TLS configuration from the +// RoundTripper. +func resolveTLSConfig(transport http.RoundTripper) *tls.Config { + switch tr := transport.(type) { + case *http.Transport: + return tr.TLSClientConfig + default: + return nil + } +} diff --git a/components/engine/client/utils.go b/components/engine/client/utils.go new file mode 100644 index 0000000000..f3d8877df7 --- /dev/null +++ b/components/engine/client/utils.go @@ -0,0 +1,34 @@ +package client + +import ( + "net/url" + "regexp" + + "github.com/docker/docker/api/types/filters" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// getFiltersQuery returns a url query with "filters" query term, based on the +// filters provided. +func getFiltersQuery(f filters.Args) (url.Values, error) { + query := url.Values{} + if f.Len() > 0 { + filterJSON, err := filters.ToParam(f) + if err != nil { + return query, err + } + query.Set("filters", filterJSON) + } + return query, nil +} diff --git a/components/engine/client/version.go b/components/engine/client/version.go new file mode 100644 index 0000000000..933ceb4a49 --- /dev/null +++ b/components/engine/client/version.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + ensureReaderClosed(resp) + return server, err +} diff --git a/components/engine/client/volume_create.go b/components/engine/client/volume_create.go new file mode 100644 index 0000000000..9620c87cbf --- /dev/null +++ b/components/engine/client/volume_create.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { + var volume types.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + if err != nil { + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + ensureReaderClosed(resp) + return volume, err +} diff --git a/components/engine/client/volume_create_test.go b/components/engine/client/volume_create_test.go new file mode 100644 index 0000000000..9f1b2540b5 --- /dev/null +++ b/components/engine/client/volume_create_test.go @@ -0,0 +1,75 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeCreate(t *testing.T) { + expectedURL := "/volumes/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.Volume{ + Name: "volume", + Driver: "local", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volume, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{ + Name: "myvolume", + Driver: "mydriver", + DriverOpts: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if volume.Name != "volume" { + t.Fatalf("expected volume.Name to be 'volume', got %s", volume.Name) + } + if volume.Driver != "local" { + t.Fatalf("expected volume.Driver to be 'local', got %s", volume.Driver) + } + if volume.Mountpoint != "mountpoint" { + t.Fatalf("expected volume.Mountpoint to be 'mountpoint', got %s", volume.Mountpoint) + } +} diff --git a/components/engine/client/volume_inspect.go b/components/engine/client/volume_inspect.go new file mode 100644 index 0000000000..3860e9b22c --- /dev/null +++ b/components/engine/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return volume, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + var volume types.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return volume, nil, volumeNotFoundError{volumeID} + } + return volume, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return volume, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&volume) + return volume, body, err +} diff --git a/components/engine/client/volume_inspect_test.go b/components/engine/client/volume_inspect_test.go new file mode 100644 index 0000000000..0d1d118828 --- /dev/null +++ b/components/engine/client/volume_inspect_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestVolumeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeInspectNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "unknown") + if err == nil || !IsErrVolumeNotFound(err) { + t.Fatalf("expected a volumeNotFound error, got %v", err) + } +} + +func TestVolumeInspect(t *testing.T) { + expectedURL := "/volumes/volume_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + content, err := json.Marshal(types.Volume{ + Name: "name", + Driver: "driver", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + v, err := client.VolumeInspect(context.Background(), "volume_id") + if err != nil { + t.Fatal(err) + } + if v.Name != "name" { + t.Fatalf("expected `name`, got %s", v.Name) + } + if v.Driver != "driver" { + t.Fatalf("expected `driver`, got %s", v.Driver) + } + if v.Mountpoint != "mountpoint" { + t.Fatalf("expected `mountpoint`, got %s", v.Mountpoint) + } +} diff --git a/components/engine/client/volume_list.go b/components/engine/client/volume_list.go new file mode 100644 index 0000000000..32247ce115 --- /dev/null +++ b/components/engine/client/volume_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { + var volumes volumetypes.VolumesListOKBody + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + ensureReaderClosed(resp) + return volumes, err +} diff --git a/components/engine/client/volume_list_test.go b/components/engine/client/volume_list_test.go new file mode 100644 index 0000000000..f29639be23 --- /dev/null +++ b/components/engine/client/volume_list_test.go @@ -0,0 +1,98 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeList(context.Background(), filters.NewArgs()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeList(t *testing.T) { + expectedURL := "/volumes" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + filters filters.Args + expectedFilters string + }{ + { + filters: filters.NewArgs(), + expectedFilters: "", + }, { + filters: noDanglingFilters, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + filters: danglingFilters, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + filters: labelFilters, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal(volumetypes.VolumesListOKBody{ + Volumes: []*types.Volume{ + { + Name: "volume", + Driver: "local", + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volumeResponse, err := client.VolumeList(context.Background(), listCase.filters) + if err != nil { + t.Fatal(err) + } + if len(volumeResponse.Volumes) != 1 { + t.Fatalf("expected 1 volume, got %v", volumeResponse.Volumes) + } + } +} diff --git a/components/engine/client/volume_prune.go b/components/engine/client/volume_prune.go new file mode 100644 index 0000000000..2e7fea7747 --- /dev/null +++ b/components/engine/client/volume_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// VolumesPrune requests the daemon to delete unused data +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { + var report types.VolumesPruneReport + + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving volume prune report: %v", err) + } + + return report, nil +} diff --git a/components/engine/client/volume_remove.go b/components/engine/client/volume_remove.go new file mode 100644 index 0000000000..6c26575b49 --- /dev/null +++ b/components/engine/client/volume_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + query := url.Values{} + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/components/engine/client/volume_remove_test.go b/components/engine/client/volume_remove_test.go new file mode 100644 index 0000000000..1fe657349a --- /dev/null +++ b/components/engine/client/volume_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestVolumeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeRemove(t *testing.T) { + expectedURL := "/volumes/volume_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/cmd/dockerd/README.md b/components/engine/cmd/dockerd/README.md new file mode 100644 index 0000000000..a8c20b3549 --- /dev/null +++ b/components/engine/cmd/dockerd/README.md @@ -0,0 +1,3 @@ +docker.go contains Docker daemon's main function. + +This file provides first line CLI argument parsing and environment variable setting. diff --git a/components/engine/cmd/dockerd/config.go b/components/engine/cmd/dockerd/config.go new file mode 100644 index 0000000000..da4071e46c --- /dev/null +++ b/components/engine/cmd/dockerd/config.go @@ -0,0 +1,67 @@ +package main + +import ( + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + "github.com/spf13/pflag" +) + +const ( + // defaultShutdownTimeout is the default shutdown timeout for the daemon + defaultShutdownTimeout = 15 + // defaultTrustKeyFile is the default filename for the trust key + defaultTrustKeyFile = "key.json" +) + +// installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + var maxConcurrentDownloads, maxConcurrentUploads int + + conf.ServiceOptions.InstallCliFlags(flags) + + flags.Var(opts.NewNamedListOptsRef("storage-opts", &conf.GraphOptions, nil), "storage-opt", "Storage driver options") + flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &conf.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") + flags.Var(opts.NewNamedListOptsRef("exec-opts", &conf.ExecOptions, nil), "exec-opt", "Runtime execution options") + flags.StringVarP(&conf.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") + flags.StringVarP(&conf.Root, "graph", "g", defaultDataRoot, "Root of the Docker runtime") + + // "--graph" is "soft-deprecated" in favor of "data-root". This flag was added + // before Docker 1.0, so won't be removed, only hidden, to discourage its usage. + flags.MarkHidden("graph") + + flags.StringVar(&conf.Root, "data-root", defaultDataRoot, "Root directory of persistent Docker state") + + flags.BoolVarP(&conf.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") + flags.MarkDeprecated("restart", "Please use a restart policy on docker run") + flags.StringVarP(&conf.GraphDriver, "storage-driver", "s", "", "Storage driver to use") + flags.IntVar(&conf.Mtu, "mtu", 0, "Set the containers network MTU") + flags.BoolVar(&conf.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") + flags.Var(opts.NewListOptsRef(&conf.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") + flags.Var(opts.NewNamedListOptsRef("dns-opts", &conf.DNSOptions, nil), "dns-opt", "DNS options to use") + flags.Var(opts.NewListOptsRef(&conf.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") + flags.Var(opts.NewNamedListOptsRef("labels", &conf.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") + flags.StringVar(&conf.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") + flags.Var(opts.NewNamedMapOpts("log-opts", conf.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") + flags.StringVar(&conf.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") + flags.StringVar(&conf.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") + flags.Var(opts.NewNamedMapOpts("cluster-store-opts", conf.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") + flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") + flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", config.DefaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") + flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", config.DefaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") + flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") + + flags.StringVar(&conf.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") + flags.BoolVar(&conf.Experimental, "experimental", false, "Enable experimental features") + + flags.StringVar(&conf.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") + + // "--deprecated-key-path" is to allow configuration of the key used + // for the daemon ID and the deprecated image signing. It was never + // exposed as a command line option but is added here to allow + // overriding the default path in configuration. + flags.Var(opts.NewQuotedString(&conf.TrustKeyPath), "deprecated-key-path", "Path to key file for ID and image signing") + flags.MarkHidden("deprecated-key-path") + + conf.MaxConcurrentDownloads = &maxConcurrentDownloads + conf.MaxConcurrentUploads = &maxConcurrentUploads +} diff --git a/components/engine/cmd/dockerd/config_common_unix.go b/components/engine/cmd/dockerd/config_common_unix.go new file mode 100644 index 0000000000..b29307b596 --- /dev/null +++ b/components/engine/cmd/dockerd/config_common_unix.go @@ -0,0 +1,34 @@ +// +build solaris linux freebsd + +package main + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile = "/var/run/docker.pid" + defaultDataRoot = "/var/lib/docker" + defaultExecRoot = "/var/run/docker" +) + +// installUnixConfigFlags adds command-line options to the top-level flag parser for +// the current process that are common across Unix platforms. +func installUnixConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + conf.Runtimes = make(map[string]types.Runtime) + + flags.StringVarP(&conf.SocketGroup, "group", "G", "docker", "Group for the unix socket") + flags.StringVar(&conf.BridgeConfig.IP, "bip", "", "Specify network bridge IP") + flags.StringVarP(&conf.BridgeConfig.Iface, "bridge", "b", "", "Attach containers to a network bridge") + flags.StringVar(&conf.BridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultGatewayIPv4, ""), "default-gateway", "Container default gateway IPv4 address") + flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultGatewayIPv6, ""), "default-gateway-v6", "Container default gateway IPv6 address") + flags.BoolVar(&conf.BridgeConfig.InterContainerCommunication, "icc", true, "Enable inter-container communication") + flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultIP, "0.0.0.0"), "ip", "Default IP when binding container ports") + flags.Var(opts.NewNamedRuntimeOpt("runtimes", &conf.Runtimes, config.StockRuntimeName), "add-runtime", "Register an additional OCI compatible runtime") + flags.StringVar(&conf.DefaultRuntime, "default-runtime", config.StockRuntimeName, "Default OCI runtime for containers") + +} diff --git a/components/engine/cmd/dockerd/config_experimental.go b/components/engine/cmd/dockerd/config_experimental.go new file mode 100644 index 0000000000..355a29e859 --- /dev/null +++ b/components/engine/cmd/dockerd/config_experimental.go @@ -0,0 +1,9 @@ +package main + +import ( + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" +) + +func attachExperimentalFlags(conf *config.Config, cmd *pflag.FlagSet) { +} diff --git a/components/engine/cmd/dockerd/config_solaris.go b/components/engine/cmd/dockerd/config_solaris.go new file mode 100644 index 0000000000..582211c6c8 --- /dev/null +++ b/components/engine/cmd/dockerd/config_solaris.go @@ -0,0 +1,19 @@ +package main + +import ( + "github.com/docker/docker/daemon/config" + runconfigopts "github.com/docker/docker/runconfig/opts" + units "github.com/docker/go-units" + "github.com/spf13/pflag" +) + +// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + installCommonConfigFlags(conf, flags) + + // Then install flags common to unix platforms + installUnixConfigFlags(conf, flags) + + attachExperimentalFlags(conf, flags) +} diff --git a/components/engine/cmd/dockerd/config_unix.go b/components/engine/cmd/dockerd/config_unix.go new file mode 100644 index 0000000000..8e741aa904 --- /dev/null +++ b/components/engine/cmd/dockerd/config_unix.go @@ -0,0 +1,52 @@ +// +build linux,!solaris freebsd,!solaris + +package main + +import ( + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + units "github.com/docker/go-units" + "github.com/spf13/pflag" +) + +// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + installCommonConfigFlags(conf, flags) + + // Then install flags common to unix platforms + installUnixConfigFlags(conf, flags) + + conf.Ulimits = make(map[string]*units.Ulimit) + + // Set default value for `--default-shm-size` + conf.ShmSize = opts.MemBytes(config.DefaultShmSize) + + // Then platform-specific install flags + flags.BoolVar(&conf.EnableSelinuxSupport, "selinux-enabled", false, "Enable selinux support") + flags.Var(opts.NewUlimitOpt(&conf.Ulimits), "default-ulimit", "Default ulimits for containers") + flags.BoolVar(&conf.BridgeConfig.EnableIPTables, "iptables", true, "Enable addition of iptables rules") + flags.BoolVar(&conf.BridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward") + flags.BoolVar(&conf.BridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading") + flags.BoolVar(&conf.BridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking") + flags.StringVar(&conf.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") + flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs") + flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic") + flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary") + flags.BoolVar(&conf.EnableCors, "api-enable-cors", false, "Enable CORS headers in the Engine API, this is deprecated by --api-cors-header") + flags.MarkDeprecated("api-enable-cors", "Please use --api-cors-header") + flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers") + flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces") + flags.StringVar(&conf.ContainerdAddr, "containerd", "", "Path to containerd socket") + flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running") + flags.IntVar(&conf.OOMScoreAdjust, "oom-score-adjust", -500, "Set the oom_score_adj for the daemon") + flags.BoolVar(&conf.Init, "init", false, "Run an init in the container to forward signals and reap processes") + flags.StringVar(&conf.InitPath, "init-path", "", "Path to the docker-init binary") + flags.Int64Var(&conf.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") + flags.Int64Var(&conf.CPURealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") + flags.StringVar(&conf.SeccompProfile, "seccomp-profile", "", "Path to seccomp profile") + flags.Var(&conf.ShmSize, "default-shm-size", "Default shm size for containers") + flags.BoolVar(&conf.NoNewPrivileges, "no-new-privileges", false, "Set no-new-privileges by default for new containers") + + attachExperimentalFlags(conf, flags) +} diff --git a/components/engine/cmd/dockerd/config_unix_test.go b/components/engine/cmd/dockerd/config_unix_test.go new file mode 100644 index 0000000000..99b2f90b40 --- /dev/null +++ b/components/engine/cmd/dockerd/config_unix_test.go @@ -0,0 +1,26 @@ +// +build linux,!solaris freebsd,!solaris + +package main + +import ( + "runtime" + "testing" + + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +func TestDaemonParseShmSize(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip("ShmSize not supported on Solaris\n") + } + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + conf := &config.Config{} + installConfigFlags(conf, flags) + // By default `--default-shm-size=64M` + assert.Equal(t, int64(64*1024*1024), conf.ShmSize.Value()) + assert.NoError(t, flags.Set("default-shm-size", "128M")) + assert.Equal(t, int64(128*1024*1024), conf.ShmSize.Value()) +} diff --git a/components/engine/cmd/dockerd/config_windows.go b/components/engine/cmd/dockerd/config_windows.go new file mode 100644 index 0000000000..79cdd25048 --- /dev/null +++ b/components/engine/cmd/dockerd/config_windows.go @@ -0,0 +1,25 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile string + defaultDataRoot = filepath.Join(os.Getenv("programdata"), "docker") +) + +// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + installCommonConfigFlags(conf, flags) + + // Then platform-specific install flags. + flags.StringVar(&conf.BridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.StringVarP(&conf.BridgeConfig.Iface, "bridge", "b", "", "Attach containers to a virtual switch") + flags.StringVarP(&conf.SocketGroup, "group", "G", "", "Users or groups that can access the named pipe") +} diff --git a/components/engine/cmd/dockerd/daemon.go b/components/engine/cmd/dockerd/daemon.go new file mode 100644 index 0000000000..5898df77f6 --- /dev/null +++ b/components/engine/cmd/dockerd/daemon.go @@ -0,0 +1,500 @@ +package main + +import ( + "context" + "crypto/tls" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + "github.com/docker/docker/api" + apiserver "github.com/docker/docker/api/server" + buildbackend "github.com/docker/docker/api/server/backend/build" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/build" + checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" + "github.com/docker/docker/api/server/router/container" + distributionrouter "github.com/docker/docker/api/server/router/distribution" + "github.com/docker/docker/api/server/router/image" + "github.com/docker/docker/api/server/router/network" + pluginrouter "github.com/docker/docker/api/server/router/plugin" + swarmrouter "github.com/docker/docker/api/server/router/swarm" + systemrouter "github.com/docker/docker/api/server/router/system" + "github.com/docker/docker/api/server/router/volume" + "github.com/docker/docker/cli/debug" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/cluster" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/libcontainerd" + dopts "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/listeners" + "github.com/docker/docker/pkg/pidfile" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/plugin" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/tlsconfig" + swarmapi "github.com/docker/swarmkit/api" + "github.com/spf13/pflag" +) + +// DaemonCli represents the daemon CLI. +type DaemonCli struct { + *config.Config + configFile *string + flags *pflag.FlagSet + + api *apiserver.Server + d *daemon.Daemon + authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins +} + +// NewDaemonCli returns a daemon CLI +func NewDaemonCli() *DaemonCli { + return &DaemonCli{} +} + +func (cli *DaemonCli) start(opts daemonOptions) (err error) { + stopc := make(chan bool) + defer close(stopc) + + // warn from uuid package when running the daemon + uuid.Loggerf = logrus.Warnf + + opts.common.SetDefaultOptions(opts.flags) + + if cli.Config, err = loadDaemonCliConfig(opts); err != nil { + return err + } + cli.configFile = &opts.configFile + cli.flags = opts.flags + + if cli.Config.Debug { + debug.Enable() + } + + if cli.Config.Experimental { + logrus.Warn("Running experimental build") + } + + logrus.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: jsonlog.RFC3339NanoFixed, + DisableColors: cli.Config.RawLogs, + }) + + if err := setDefaultUmask(); err != nil { + return fmt.Errorf("Failed to set umask: %v", err) + } + + if len(cli.LogConfig.Config) > 0 { + if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { + return fmt.Errorf("Failed to set log opts: %v", err) + } + } + + // Create the daemon root before we create ANY other files (PID, or migrate keys) + // to ensure the appropriate ACL is set (particularly relevant on Windows) + if err := daemon.CreateDaemonRoot(cli.Config); err != nil { + return err + } + + if cli.Pidfile != "" { + pf, err := pidfile.New(cli.Pidfile) + if err != nil { + return fmt.Errorf("Error starting daemon: %v", err) + } + defer func() { + if err := pf.Remove(); err != nil { + logrus.Error(err) + } + }() + } + + serverConfig := &apiserver.Config{ + Logging: true, + SocketGroup: cli.Config.SocketGroup, + Version: dockerversion.Version, + EnableCors: cli.Config.EnableCors, + CorsHeaders: cli.Config.CorsHeaders, + } + + if cli.Config.TLS { + tlsOptions := tlsconfig.Options{ + CAFile: cli.Config.CommonTLSOptions.CAFile, + CertFile: cli.Config.CommonTLSOptions.CertFile, + KeyFile: cli.Config.CommonTLSOptions.KeyFile, + ExclusiveRootPools: true, + } + + if cli.Config.TLSVerify { + // server requires and verifies client's certificate + tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert + } + tlsConfig, err := tlsconfig.Server(tlsOptions) + if err != nil { + return err + } + serverConfig.TLSConfig = tlsConfig + } + + if len(cli.Config.Hosts) == 0 { + cli.Config.Hosts = make([]string, 1) + } + + api := apiserver.New(serverConfig) + cli.api = api + + for i := 0; i < len(cli.Config.Hosts); i++ { + var err error + if cli.Config.Hosts[i], err = dopts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { + return fmt.Errorf("error parsing -H %s : %v", cli.Config.Hosts[i], err) + } + + protoAddr := cli.Config.Hosts[i] + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) + } + + proto := protoAddrParts[0] + addr := protoAddrParts[1] + + // It's a bad idea to bind to TCP without tlsverify. + if proto == "tcp" && (serverConfig.TLSConfig == nil || serverConfig.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert) { + logrus.Warn("[!] DON'T BIND ON ANY IP ADDRESS WITHOUT setting --tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING [!]") + } + ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) + if err != nil { + return err + } + ls = wrapListeners(proto, ls) + // If we're binding to a TCP port, make sure that a container doesn't try to use it. + if proto == "tcp" { + if err := allocateDaemonPort(addr); err != nil { + return err + } + } + logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) + api.Accept(addr, ls...) + } + + registryService := registry.NewService(cli.Config.ServiceOptions) + containerdRemote, err := libcontainerd.New(cli.getLibcontainerdRoot(), cli.getPlatformRemoteOptions()...) + if err != nil { + return err + } + signal.Trap(func() { + cli.stop() + <-stopc // wait for daemonCli.start() to return + }) + + // Notify that the API is active, but before daemon is set up. + preNotifySystem() + + pluginStore := plugin.NewStore() + + if err := cli.initMiddlewares(api, serverConfig, pluginStore); err != nil { + logrus.Fatalf("Error creating middlewares: %v", err) + } + + d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote, pluginStore) + if err != nil { + return fmt.Errorf("Error starting daemon: %v", err) + } + + // validate after NewDaemon has restored enabled plugins. Dont change order. + if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { + return fmt.Errorf("Error validating authorization plugin: %v", err) + } + + if cli.Config.MetricsAddress != "" { + if !d.HasExperimental() { + return fmt.Errorf("metrics-addr is only supported when experimental is enabled") + } + if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { + return err + } + } + + name, _ := os.Hostname() + + // Use a buffered channel to pass changes from store watch API to daemon + // A buffer allows store watch API and daemon processing to not wait for each other + watchStream := make(chan *swarmapi.WatchMessage, 32) + + c, err := cluster.New(cluster.Config{ + Root: cli.Config.Root, + Name: name, + Backend: d, + NetworkSubnetsProvider: d, + DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, + RuntimeRoot: cli.getSwarmRunRoot(), + WatchStream: watchStream, + }) + if err != nil { + logrus.Fatalf("Error creating cluster component: %v", err) + } + d.SetCluster(c) + err = c.Start() + if err != nil { + logrus.Fatalf("Error starting cluster component: %v", err) + } + + // Restart all autostart containers which has a swarm endpoint + // and is not yet running now that we have successfully + // initialized the cluster. + d.RestartSwarmContainers() + + logrus.Info("Daemon has completed initialization") + + logrus.WithFields(logrus.Fields{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, + "graphdriver": d.GraphDriverName(), + }).Info("Docker daemon") + + cli.d = d + + initRouter(api, d, c) + + // process cluster change notifications + watchCtx, cancel := context.WithCancel(context.Background()) + defer cancel() + go d.ProcessClusterNotifications(watchCtx, watchStream) + + cli.setupConfigReloadTrap() + + // The serve API routine never exits unless an error occurs + // We need to start it as a goroutine and wait on it so + // daemon doesn't exit + serveAPIWait := make(chan error) + go api.Wait(serveAPIWait) + + // after the daemon is done setting up we can notify systemd api + notifySystem() + + // Daemon is fully initialized and handling API traffic + // Wait for serve API to complete + errAPI := <-serveAPIWait + c.Cleanup() + shutdownDaemon(d) + containerdRemote.Cleanup() + if errAPI != nil { + return fmt.Errorf("Shutting down due to ServeAPI error: %v", errAPI) + } + + return nil +} + +func (cli *DaemonCli) reloadConfig() { + reload := func(config *config.Config) { + + // Revalidate and reload the authorization plugins + if err := validateAuthzPlugins(config.AuthorizationPlugins, cli.d.PluginStore); err != nil { + logrus.Fatalf("Error validating authorization plugin: %v", err) + return + } + cli.authzMiddleware.SetPlugins(config.AuthorizationPlugins) + + if err := cli.d.Reload(config); err != nil { + logrus.Errorf("Error reconfiguring the daemon: %v", err) + return + } + + if config.IsValueSet("debug") { + debugEnabled := debug.IsEnabled() + switch { + case debugEnabled && !config.Debug: // disable debug + debug.Disable() + cli.api.DisableProfiler() + case config.Debug && !debugEnabled: // enable debug + debug.Enable() + cli.api.EnableProfiler() + } + + } + } + + if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { + logrus.Error(err) + } +} + +func (cli *DaemonCli) stop() { + cli.api.Close() +} + +// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case +// d.Shutdown() is waiting too long to kill container or worst it's +// blocked there +func shutdownDaemon(d *daemon.Daemon) { + shutdownTimeout := d.ShutdownTimeout() + ch := make(chan struct{}) + go func() { + d.Shutdown() + close(ch) + }() + if shutdownTimeout < 0 { + <-ch + logrus.Debug("Clean shutdown succeeded") + return + } + select { + case <-ch: + logrus.Debug("Clean shutdown succeeded") + case <-time.After(time.Duration(shutdownTimeout) * time.Second): + logrus.Error("Force shutdown daemon") + } +} + +func loadDaemonCliConfig(opts daemonOptions) (*config.Config, error) { + conf := opts.daemonConfig + flags := opts.flags + conf.Debug = opts.common.Debug + conf.Hosts = opts.common.Hosts + conf.LogLevel = opts.common.LogLevel + conf.TLS = opts.common.TLS + conf.TLSVerify = opts.common.TLSVerify + conf.CommonTLSOptions = config.CommonTLSOptions{} + + if opts.common.TLSOptions != nil { + conf.CommonTLSOptions.CAFile = opts.common.TLSOptions.CAFile + conf.CommonTLSOptions.CertFile = opts.common.TLSOptions.CertFile + conf.CommonTLSOptions.KeyFile = opts.common.TLSOptions.KeyFile + } + + if conf.TrustKeyPath == "" { + conf.TrustKeyPath = filepath.Join( + getDaemonConfDir(conf.Root), + defaultTrustKeyFile) + } + + if flags.Changed("graph") && flags.Changed("data-root") { + return nil, fmt.Errorf(`cannot specify both "--graph" and "--data-root" option`) + } + + if opts.configFile != "" { + c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) + if err != nil { + if flags.Changed("config-file") || !os.IsNotExist(err) { + return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", opts.configFile, err) + } + } + // the merged configuration can be nil if the config file didn't exist. + // leave the current configuration as it is if when that happens. + if c != nil { + conf = c + } + } + + if err := config.Validate(conf); err != nil { + return nil, err + } + + if flags.Changed("graph") { + logrus.Warnf(`the "-g / --graph" flag is deprecated. Please use "--data-root" instead`) + } + + // Labels of the docker engine used to allow multiple values associated with the same key. + // This is deprecated in 1.13, and, be removed after 3 release cycles. + // The following will check the conflict of labels, and report a warning for deprecation. + // + // TODO: After 3 release cycles (17.12) an error will be returned, and labels will be + // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): + // + // newLabels, err := daemon.GetConflictFreeLabels(config.Labels) + // if err != nil { + // return nil, err + // } + // config.Labels = newLabels + // + if _, err := config.GetConflictFreeLabels(conf.Labels); err != nil { + logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + } + + // Regardless of whether the user sets it to true or false, if they + // specify TLSVerify at all then we need to turn on TLS + if conf.IsValueSet(cliflags.FlagTLSVerify) { + conf.TLS = true + } + + // ensure that the log level is the one set after merging configurations + cliflags.SetLogLevel(conf.LogLevel) + + return conf, nil +} + +func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) { + decoder := runconfig.ContainerDecoder{} + + routers := []router.Router{ + // we need to add the checkpoint router before the container router or the DELETE gets masked + checkpointrouter.NewRouter(d, decoder), + container.NewRouter(d, decoder), + image.NewRouter(d, decoder), + systemrouter.NewRouter(d, c), + volume.NewRouter(d), + build.NewRouter(buildbackend.NewBackend(d, d), d), + swarmrouter.NewRouter(c), + pluginrouter.NewRouter(d.PluginManager()), + distributionrouter.NewRouter(d), + } + + if d.NetworkControllerEnabled() { + routers = append(routers, network.NewRouter(d, c)) + } + + if d.HasExperimental() { + for _, r := range routers { + for _, route := range r.Routes() { + if experimental, ok := route.(router.ExperimentalRoute); ok { + experimental.Enable() + } + } + } + } + + s.InitRouter(debug.IsEnabled(), routers...) +} + +func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore *plugin.Store) error { + v := cfg.Version + + exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) + s.UseMiddleware(exp) + + vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) + s.UseMiddleware(vm) + + if cfg.EnableCors || cfg.CorsHeaders != "" { + c := middleware.NewCORSMiddleware(cfg.CorsHeaders) + s.UseMiddleware(c) + } + + cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) + cli.Config.AuthzMiddleware = cli.authzMiddleware + s.UseMiddleware(cli.authzMiddleware) + return nil +} + +// validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver +// plugins present on the host and available to the daemon +func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { + for _, reqPlugin := range requestedPlugins { + if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { + return err + } + } + return nil +} diff --git a/components/engine/cmd/dockerd/daemon_freebsd.go b/components/engine/cmd/dockerd/daemon_freebsd.go new file mode 100644 index 0000000000..6d013b8103 --- /dev/null +++ b/components/engine/cmd/dockerd/daemon_freebsd.go @@ -0,0 +1,9 @@ +package main + +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} diff --git a/components/engine/cmd/dockerd/daemon_linux.go b/components/engine/cmd/dockerd/daemon_linux.go new file mode 100644 index 0000000000..a909ee4fbd --- /dev/null +++ b/components/engine/cmd/dockerd/daemon_linux.go @@ -0,0 +1,15 @@ +// +build linux + +package main + +import systemdDaemon "github.com/coreos/go-systemd/daemon" + +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { + // Tell the init daemon we are accepting requests + go systemdDaemon.SdNotify("READY=1") +} diff --git a/components/engine/cmd/dockerd/daemon_solaris.go b/components/engine/cmd/dockerd/daemon_solaris.go new file mode 100644 index 0000000000..e12da525a5 --- /dev/null +++ b/components/engine/cmd/dockerd/daemon_solaris.go @@ -0,0 +1,89 @@ +// +build solaris + +package main + +import ( + "fmt" + "net" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" +) + +const defaultDaemonConfigFile = "" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.UID()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir(_ string) string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { +} + +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{} + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + return opts +} + +// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to +// store their state. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return filepath.Join(cli.Config.ExecRoot, "swarm") +} + +func allocateDaemonPort(addr string) error { + return nil +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/components/engine/cmd/dockerd/daemon_test.go b/components/engine/cmd/dockerd/daemon_test.go new file mode 100644 index 0000000000..c3ad617c78 --- /dev/null +++ b/components/engine/cmd/dockerd/daemon_test.go @@ -0,0 +1,149 @@ +package main + +import ( + "testing" + + "github.com/Sirupsen/logrus" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/pkg/testutil" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func defaultOptions(configFile string) daemonOptions { + opts := daemonOptions{ + daemonConfig: &config.Config{}, + flags: &pflag.FlagSet{}, + common: cliflags.NewCommonOptions(), + } + opts.common.InstallFlags(opts.flags) + installConfigFlags(opts.daemonConfig, opts.flags) + opts.flags.StringVar(&opts.configFile, "config-file", defaultDaemonConfigFile, "") + opts.configFile = configFile + return opts +} + +func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) { + opts := defaultOptions("") + opts.common.Debug = true + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + if !loadedConfig.Debug { + t.Fatalf("expected debug to be copied from the common flags, got false") + } +} + +func TestLoadDaemonCliConfigWithTLS(t *testing.T) { + opts := defaultOptions("") + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + opts.common.TLS = true + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.Equal(t, "/tmp/ca.pem", loadedConfig.CommonTLSOptions.CAFile) +} + +func TestLoadDaemonCliConfigWithConflicts(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"labels": ["l3=foo"]}`) + defer tempFile.Remove() + configFile := tempFile.Name() + + opts := defaultOptions(configFile) + flags := opts.flags + + assert.NoError(t, flags.Set("config-file", configFile)) + assert.NoError(t, flags.Set("label", "l1=bar")) + assert.NoError(t, flags.Set("label", "l2=baz")) + + _, err := loadDaemonCliConfig(opts) + testutil.ErrorContains(t, err, "as a flag and in the configuration file: labels") +} + +func TestLoadDaemonCliConfigWithTLSVerify(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": true}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.TLS, true) +} + +func TestLoadDaemonCliConfigWithExplicitTLSVerifyFalse(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": false}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.True(t, loadedConfig.TLS) +} + +func TestLoadDaemonCliConfigWithoutTLSVerify(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.False(t, loadedConfig.TLS) +} + +func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"log-level": "warn"}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.Equal(t, "warn", loadedConfig.LogLevel) + assert.Equal(t, logrus.WarnLevel, logrus.GetLevel()) +} + +func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) { + content := `{"tlscacert": "/etc/certs/ca.pem", "log-driver": "syslog"}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.Equal(t, "/etc/certs/ca.pem", loadedConfig.CommonTLSOptions.CAFile) + assert.Equal(t, "syslog", loadedConfig.LogConfig.Type) +} + +func TestLoadDaemonConfigWithRegistryOptions(t *testing.T) { + content := `{ + "allow-nondistributable-artifacts": ["allow-nondistributable-artifacts.com"], + "registry-mirrors": ["https://mirrors.docker.com"], + "insecure-registries": ["https://insecure.docker.com"] + }` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.Len(t, loadedConfig.AllowNondistributableArtifacts, 1) + assert.Len(t, loadedConfig.Mirrors, 1) + assert.Len(t, loadedConfig.InsecureRegistries, 1) +} diff --git a/components/engine/cmd/dockerd/daemon_unix.go b/components/engine/cmd/dockerd/daemon_unix.go new file mode 100644 index 0000000000..46ef58541f --- /dev/null +++ b/components/engine/cmd/dockerd/daemon_unix.go @@ -0,0 +1,125 @@ +// +build !windows,!solaris + +package main + +import ( + "fmt" + "net" + "os" + "os/signal" + "path/filepath" + "strconv" + "syscall" + + "github.com/docker/docker/cmd/dockerd/hack" + "github.com/docker/docker/daemon" + "github.com/docker/docker/libcontainerd" + "github.com/docker/libnetwork/portallocator" +) + +const defaultDaemonConfigFile = "/etc/docker/daemon.json" + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir(_ string) string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + go func() { + for range c { + cli.reloadConfig() + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{ + libcontainerd.WithDebugLog(cli.Config.Debug), + libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust), + } + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + if daemon.UsingSystemd(cli.Config) { + args := []string{"--systemd-cgroup=true"} + opts = append(opts, libcontainerd.WithRuntimeArgs(args)) + } + if cli.Config.LiveRestoreEnabled { + opts = append(opts, libcontainerd.WithLiveRestore(true)) + } + opts = append(opts, libcontainerd.WithRuntimePath(daemon.DefaultRuntimeBinary)) + return opts +} + +// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to +// store their state. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return filepath.Join(cli.Config.ExecRoot, "swarm") +} + +// allocateDaemonPort ensures that there are no containers +// that try to use any port allocated for the docker server. +func allocateDaemonPort(addr string) error { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + intPort, err := strconv.Atoi(port) + if err != nil { + return err + } + + var hostIPs []net.IP + if parsedIP := net.ParseIP(host); parsedIP != nil { + hostIPs = append(hostIPs, parsedIP) + } else if hostIPs, err = net.LookupIP(host); err != nil { + return fmt.Errorf("failed to lookup %s address in host specification", host) + } + + pa := portallocator.Get() + for _, hostIP := range hostIPs { + if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { + return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) + } + } + return nil +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + switch proto { + case "unix": + ls[0] = &hack.MalformedHostHeaderOverride{ls[0]} + case "fd": + for i := range ls { + ls[i] = &hack.MalformedHostHeaderOverride{ls[i]} + } + } + return ls +} diff --git a/components/engine/cmd/dockerd/daemon_unix_test.go b/components/engine/cmd/dockerd/daemon_unix_test.go new file mode 100644 index 0000000000..4aaa758d53 --- /dev/null +++ b/components/engine/cmd/dockerd/daemon_unix_test.go @@ -0,0 +1,114 @@ +// +build !windows,!solaris + +// TODO: Create new file for Solaris which tests config parameters +// as described in daemon/config_solaris.go + +package main + +import ( + "testing" + + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLoadDaemonCliConfigWithDaemonFlags(t *testing.T) { + content := `{"log-opts": {"max-size": "1k"}}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.Debug = true + opts.common.LogLevel = "info" + assert.NoError(t, opts.flags.Set("selinux-enabled", "true")) + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.True(t, loadedConfig.Debug) + assert.Equal(t, "info", loadedConfig.LogLevel) + assert.True(t, loadedConfig.EnableSelinuxSupport) + assert.Equal(t, "json-file", loadedConfig.LogConfig.Type) + assert.Equal(t, "1k", loadedConfig.LogConfig.Config["max-size"]) +} + +func TestLoadDaemonConfigWithNetwork(t *testing.T) { + content := `{"bip": "127.0.0.2", "ip": "127.0.0.1"}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.Equal(t, "127.0.0.2", loadedConfig.IP) + assert.Equal(t, "127.0.0.1", loadedConfig.DefaultIP.String()) +} + +func TestLoadDaemonConfigWithMapOptions(t *testing.T) { + content := `{ + "cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"}, + "log-opts": {"tag": "test"} +}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.NotNil(t, loadedConfig.ClusterOpts) + + expectedPath := "/var/lib/docker/discovery_certs/ca.pem" + assert.Equal(t, expectedPath, loadedConfig.ClusterOpts["kv.cacertfile"]) + assert.NotNil(t, loadedConfig.LogConfig.Config) + assert.Equal(t, "test", loadedConfig.LogConfig.Config["tag"]) +} + +func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) { + content := `{ "userland-proxy": false }` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.False(t, loadedConfig.EnableUserlandProxy) + + // make sure reloading doesn't generate configuration + // conflicts after normalizing boolean values. + reload := func(reloadedConfig *config.Config) { + assert.False(t, reloadedConfig.EnableUserlandProxy) + } + assert.NoError(t, config.Reload(opts.configFile, opts.flags, reload)) +} + +func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.True(t, loadedConfig.EnableUserlandProxy) +} + +func TestLoadDaemonConfigWithLegacyRegistryOptions(t *testing.T) { + content := `{"disable-legacy-registry": true}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.True(t, loadedConfig.V2Only) +} diff --git a/components/engine/cmd/dockerd/daemon_windows.go b/components/engine/cmd/dockerd/daemon_windows.go new file mode 100644 index 0000000000..6682cc8cda --- /dev/null +++ b/components/engine/cmd/dockerd/daemon_windows.go @@ -0,0 +1,98 @@ +package main + +import ( + "fmt" + "net" + "os" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" +) + +var defaultDaemonConfigFile = "" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + return false +} + +// setDefaultUmask doesn't do anything on windows +func setDefaultUmask() error { + return nil +} + +func getDaemonConfDir(root string) string { + return filepath.Join(root, `\config`) +} + +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { + // start the service now to prevent timeouts waiting for daemon to start + // but still (eventually) complete all requests that are sent after this + if service != nil { + err := service.started() + if err != nil { + logrus.Fatal(err) + } + } +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { + if service != nil { + if err != nil { + logrus.Fatal(err) + } + service.stopped(err) + } +} + +// setupConfigReloadTrap configures a Win32 event to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { + go func() { + sa := syscall.SecurityAttributes{ + Length: 0, + } + ev := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()) + if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { + logrus.Debugf("Config reload - waiting signal at %s", ev) + for { + syscall.WaitForSingleObject(h, syscall.INFINITE) + cli.reloadConfig() + } + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + return nil +} + +// getLibcontainerdRoot gets the root directory for libcontainerd to store its +// state. The Windows libcontainerd implementation does not need to write a spec +// or state to disk, so this is a no-op. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return "" +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return "" +} + +func allocateDaemonPort(addr string) error { + return nil +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/components/engine/cmd/dockerd/docker.go b/components/engine/cmd/dockerd/docker.go new file mode 100644 index 0000000000..c0bc965674 --- /dev/null +++ b/components/engine/cmd/dockerd/docker.go @@ -0,0 +1,122 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/term" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type daemonOptions struct { + version bool + configFile string + daemonConfig *config.Config + common *cliflags.CommonOptions + flags *pflag.FlagSet +} + +func newDaemonCommand() *cobra.Command { + opts := daemonOptions{ + daemonConfig: config.New(), + common: cliflags.NewCommonOptions(), + } + + cmd := &cobra.Command{ + Use: "dockerd [OPTIONS]", + Short: "A self-sufficient runtime for containers.", + SilenceUsage: true, + SilenceErrors: true, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + opts.flags = cmd.Flags() + return runDaemon(opts) + }, + } + cli.SetupRootCommand(cmd) + + flags := cmd.Flags() + flags.BoolVarP(&opts.version, "version", "v", false, "Print version information and quit") + flags.StringVar(&opts.configFile, "config-file", defaultDaemonConfigFile, "Daemon configuration file") + opts.common.InstallFlags(flags) + installConfigFlags(opts.daemonConfig, flags) + installServiceFlags(flags) + + return cmd +} + +func runDaemon(opts daemonOptions) error { + if opts.version { + showVersion() + return nil + } + + daemonCli := NewDaemonCli() + + // Windows specific settings as these are not defaulted. + if runtime.GOOS == "windows" { + if opts.daemonConfig.Pidfile == "" { + opts.daemonConfig.Pidfile = filepath.Join(opts.daemonConfig.Root, "docker.pid") + } + if opts.configFile == "" { + opts.configFile = filepath.Join(opts.daemonConfig.Root, `config\daemon.json`) + } + } + + // On Windows, this may be launching as a service or with an option to + // register the service. + stop, runAsService, err := initService(daemonCli) + if err != nil { + logrus.Fatal(err) + } + + if stop { + return nil + } + + // If Windows SCM manages the service - no need for PID files + if runAsService { + opts.daemonConfig.Pidfile = "" + } + + err = daemonCli.start(opts) + notifyShutdown(err) + return err +} + +func showVersion() { + fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) +} + +func main() { + if reexec.Init() { + return + } + + // Set terminal emulation based on platform as required. + _, stdout, stderr := term.StdStreams() + + // @jhowardmsft - maybe there is a historic reason why on non-Windows, stderr is used + // here. However, on Windows it makes no sense and there is no need. + if runtime.GOOS == "windows" { + logrus.SetOutput(stdout) + } else { + logrus.SetOutput(stderr) + } + + cmd := newDaemonCommand() + cmd.SetOutput(stdout) + if err := cmd.Execute(); err != nil { + fmt.Fprintf(stderr, "%s\n", err) + os.Exit(1) + } +} diff --git a/components/engine/cmd/dockerd/docker_windows.go b/components/engine/cmd/dockerd/docker_windows.go new file mode 100644 index 0000000000..19c5587cb6 --- /dev/null +++ b/components/engine/cmd/dockerd/docker_windows.go @@ -0,0 +1,18 @@ +package main + +import ( + "sync/atomic" + + _ "github.com/docker/docker/autogen/winresources/dockerd" +) + +//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" + +var dummy uintptr + +func init() { + // Ensure that this import is not removed by the linker. This is used to + // ensure that shell32.dll is loaded by the system loader, preventing + // go#15286 from triggering on Nano Server TP5. + atomic.LoadUintptr(&dummy) +} diff --git a/components/engine/cmd/dockerd/hack/malformed_host_override.go b/components/engine/cmd/dockerd/hack/malformed_host_override.go new file mode 100644 index 0000000000..e7175caa0b --- /dev/null +++ b/components/engine/cmd/dockerd/hack/malformed_host_override.go @@ -0,0 +1,121 @@ +// +build !windows + +package hack + +import "net" + +// MalformedHostHeaderOverride is a wrapper to be able +// to overcome the 400 Bad request coming from old docker +// clients that send an invalid Host header. +type MalformedHostHeaderOverride struct { + net.Listener +} + +// MalformedHostHeaderOverrideConn wraps the underlying unix +// connection and keeps track of the first read from http.Server +// which just reads the headers. +type MalformedHostHeaderOverrideConn struct { + net.Conn + first bool +} + +var closeConnHeader = []byte("\r\nConnection: close\r") + +// Read reads the first *read* request from http.Server to inspect +// the Host header. If the Host starts with / then we're talking to +// an old docker client which send an invalid Host header. To not +// error out in http.Server we rewrite the first bytes of the request +// to sanitize the Host header itself. +// In case we're not dealing with old docker clients the data is just passed +// to the server w/o modification. +func (l *MalformedHostHeaderOverrideConn) Read(b []byte) (n int, err error) { + // http.Server uses a 4k buffer + if l.first && len(b) == 4096 { + // This keeps track of the first read from http.Server which just reads + // the headers + l.first = false + // The first read of the connection by http.Server is done limited to + // DefaultMaxHeaderBytes (usually 1 << 20) + 4096. + // Here we do the first read which gets us all the http headers to + // be inspected and modified below. + c, err := l.Conn.Read(b) + if err != nil { + return c, err + } + + var ( + start, end int + firstLineFeed = -1 + buf []byte + ) + for i := 0; i <= c-1-7; i++ { + if b[i] == '\n' && firstLineFeed == -1 { + firstLineFeed = i + } + if b[i] != '\n' { + continue + } + + if b[i+1] == '\r' && b[i+2] == '\n' { + return c, nil + } + + if b[i+1] != 'H' { + continue + } + if b[i+2] != 'o' { + continue + } + if b[i+3] != 's' { + continue + } + if b[i+4] != 't' { + continue + } + if b[i+5] != ':' { + continue + } + if b[i+6] != ' ' { + continue + } + if b[i+7] != '/' { + continue + } + // ensure clients other than the docker clients do not get this hack + if i != firstLineFeed { + return c, nil + } + start = i + 7 + // now find where the value ends + for ii, bbb := range b[start:c] { + if bbb == '\n' { + end = start + ii + break + } + } + buf = make([]byte, 0, c+len(closeConnHeader)-(end-start)) + // strip the value of the host header and + // inject `Connection: close` to ensure we don't reuse this connection + buf = append(buf, b[:start]...) + buf = append(buf, closeConnHeader...) + buf = append(buf, b[end:c]...) + copy(b, buf) + break + } + if len(buf) == 0 { + return c, nil + } + return len(buf), nil + } + return l.Conn.Read(b) +} + +// Accept makes the listener accepts connections and wraps the connection +// in a MalformedHostHeaderOverrideConn initializing first to true. +func (l *MalformedHostHeaderOverride) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return c, err + } + return &MalformedHostHeaderOverrideConn{c, true}, nil +} diff --git a/components/engine/cmd/dockerd/hack/malformed_host_override_test.go b/components/engine/cmd/dockerd/hack/malformed_host_override_test.go new file mode 100644 index 0000000000..1a0a60baf3 --- /dev/null +++ b/components/engine/cmd/dockerd/hack/malformed_host_override_test.go @@ -0,0 +1,124 @@ +// +build !windows + +package hack + +import ( + "bytes" + "io" + "net" + "strings" + "testing" +) + +type bufConn struct { + net.Conn + buf *bytes.Buffer +} + +func (bc *bufConn) Read(b []byte) (int, error) { + return bc.buf.Read(b) +} + +func TestHeaderOverrideHack(t *testing.T) { + tests := [][2][]byte{ + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\nFoo: Bar\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\nFoo: Bar\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something!"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something!"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + }, + { + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + }, + } + + // Test for https://github.com/docker/docker/issues/23045 + h0 := "GET /foo\nUser-Agent: Docker\r\n\r\n" + h0 = h0 + strings.Repeat("a", 4096-len(h0)-1) + "\n" + tests = append(tests, [2][]byte{[]byte(h0), []byte(h0)}) + + for _, pair := range tests { + read := make([]byte, 4096) + client := &bufConn{ + buf: bytes.NewBuffer(pair[0]), + } + l := MalformedHostHeaderOverrideConn{client, true} + + n, err := l.Read(read) + if err != nil && err != io.EOF { + t.Fatalf("read: %d - %d, err: %v\n%s", n, len(pair[0]), err, string(read[:n])) + } + if !bytes.Equal(read[:n], pair[1][:n]) { + t.Fatalf("\n%s\n%s\n", read[:n], pair[1][:n]) + } + } +} + +func BenchmarkWithHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + l := MalformedHostHeaderOverrideConn{client, true} + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + l.first = true // make sure each subsequent run uses the hack parsing + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if n, err := l.Read(read); err != nil && err != io.EOF { + b.Fatalf("read: %d - %d, err: %v\n%s", n, len(req), err, string(read[:n])) + } + } + } + l.Close() + <-done +} + +func BenchmarkNoHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if _, err := client.Read(read); err != nil && err != io.EOF { + b.Fatal(err) + } + } + } + client.Close() + <-done +} diff --git a/components/engine/cmd/dockerd/metrics.go b/components/engine/cmd/dockerd/metrics.go new file mode 100644 index 0000000000..0c8860408b --- /dev/null +++ b/components/engine/cmd/dockerd/metrics.go @@ -0,0 +1,27 @@ +package main + +import ( + "net" + "net/http" + + "github.com/Sirupsen/logrus" + metrics "github.com/docker/go-metrics" +) + +func startMetricsServer(addr string) error { + if err := allocateDaemonPort(addr); err != nil { + return err + } + l, err := net.Listen("tcp", addr) + if err != nil { + return err + } + mux := http.NewServeMux() + mux.Handle("/metrics", metrics.Handler()) + go func() { + if err := http.Serve(l, mux); err != nil { + logrus.Errorf("serve metrics api: %s", err) + } + }() + return nil +} diff --git a/components/engine/cmd/dockerd/service_unsupported.go b/components/engine/cmd/dockerd/service_unsupported.go new file mode 100644 index 0000000000..e67ad474b1 --- /dev/null +++ b/components/engine/cmd/dockerd/service_unsupported.go @@ -0,0 +1,14 @@ +// +build !windows + +package main + +import ( + "github.com/spf13/pflag" +) + +func initService(daemonCli *DaemonCli) (bool, bool, error) { + return false, false, nil +} + +func installServiceFlags(flags *pflag.FlagSet) { +} diff --git a/components/engine/cmd/dockerd/service_windows.go b/components/engine/cmd/dockerd/service_windows.go new file mode 100644 index 0000000000..749cc9009e --- /dev/null +++ b/components/engine/cmd/dockerd/service_windows.go @@ -0,0 +1,431 @@ +package main + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "syscall" + "time" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/spf13/pflag" + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/debug" + "golang.org/x/sys/windows/svc/eventlog" + "golang.org/x/sys/windows/svc/mgr" +) + +var ( + flServiceName *string + flRegisterService *bool + flUnregisterService *bool + flRunService *bool + + setStdHandle = windows.NewLazySystemDLL("kernel32.dll").NewProc("SetStdHandle") + oldStderr syscall.Handle + panicFile *os.File + + service *handler +) + +const ( + // These should match the values in event_messages.mc. + eventInfo = 1 + eventWarn = 1 + eventError = 1 + eventDebug = 2 + eventPanic = 3 + eventFatal = 4 + + eventExtraOffset = 10 // Add this to any event to get a string that supports extended data +) + +func installServiceFlags(flags *pflag.FlagSet) { + flServiceName = flags.String("service-name", "docker", "Set the Windows service name") + flRegisterService = flags.Bool("register-service", false, "Register the service and exit") + flUnregisterService = flags.Bool("unregister-service", false, "Unregister the service and exit") + flRunService = flags.Bool("run-service", false, "") + flags.MarkHidden("run-service") +} + +type handler struct { + tosvc chan bool + fromsvc chan error + daemonCli *DaemonCli +} + +type etwHook struct { + log *eventlog.Log +} + +func (h *etwHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} + +func (h *etwHook) Fire(e *logrus.Entry) error { + var ( + etype uint16 + eid uint32 + ) + + switch e.Level { + case logrus.PanicLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventPanic + case logrus.FatalLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventFatal + case logrus.ErrorLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventError + case logrus.WarnLevel: + etype = windows.EVENTLOG_WARNING_TYPE + eid = eventWarn + case logrus.InfoLevel: + etype = windows.EVENTLOG_INFORMATION_TYPE + eid = eventInfo + case logrus.DebugLevel: + etype = windows.EVENTLOG_INFORMATION_TYPE + eid = eventDebug + default: + return errors.New("unknown level") + } + + // If there is additional data, include it as a second string. + exts := "" + if len(e.Data) > 0 { + fs := bytes.Buffer{} + for k, v := range e.Data { + fs.WriteString(k) + fs.WriteByte('=') + fmt.Fprint(&fs, v) + fs.WriteByte(' ') + } + + exts = fs.String()[:fs.Len()-1] + eid += eventExtraOffset + } + + if h.log == nil { + fmt.Fprintf(os.Stderr, "%s [%s]\n", e.Message, exts) + return nil + } + + var ( + ss [2]*uint16 + err error + ) + + ss[0], err = syscall.UTF16PtrFromString(e.Message) + if err != nil { + return err + } + + count := uint16(1) + if exts != "" { + ss[1], err = syscall.UTF16PtrFromString(exts) + if err != nil { + return err + } + + count++ + } + + return windows.ReportEvent(h.log.Handle, etype, 0, eid, 0, count, 0, &ss[0], nil) +} + +func getServicePath() (string, error) { + p, err := exec.LookPath(os.Args[0]) + if err != nil { + return "", err + } + return filepath.Abs(p) +} + +func registerService() error { + p, err := getServicePath() + if err != nil { + return err + } + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + depends := []string{} + + // This dependency is required on build 14393 (RS1) + // it is added to the platform in newer builds + if system.GetOSVersion().Build == 14393 { + depends = append(depends, "ConDrv") + } + + c := mgr.Config{ + ServiceType: windows.SERVICE_WIN32_OWN_PROCESS, + StartType: mgr.StartAutomatic, + ErrorControl: mgr.ErrorNormal, + Dependencies: depends, + DisplayName: "Docker Engine", + } + + // Configure the service to launch with the arguments that were just passed. + args := []string{"--run-service"} + for _, a := range os.Args[1:] { + if a != "--register-service" && a != "--unregister-service" { + args = append(args, a) + } + } + + s, err := m.CreateService(*flServiceName, p, c, args...) + if err != nil { + return err + } + defer s.Close() + + // See http://stackoverflow.com/questions/35151052/how-do-i-configure-failure-actions-of-a-windows-service-written-in-go + const ( + scActionNone = 0 + scActionRestart = 1 + scActionReboot = 2 + scActionRunCommand = 3 + + serviceConfigFailureActions = 2 + ) + + type serviceFailureActions struct { + ResetPeriod uint32 + RebootMsg *uint16 + Command *uint16 + ActionsCount uint32 + Actions uintptr + } + + type scAction struct { + Type uint32 + Delay uint32 + } + t := []scAction{ + {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, + {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, + {Type: scActionNone}, + } + lpInfo := serviceFailureActions{ResetPeriod: uint32(24 * time.Hour / time.Second), ActionsCount: uint32(3), Actions: uintptr(unsafe.Pointer(&t[0]))} + err = windows.ChangeServiceConfig2(s.Handle, serviceConfigFailureActions, (*byte)(unsafe.Pointer(&lpInfo))) + if err != nil { + return err + } + + return eventlog.Install(*flServiceName, p, false, eventlog.Info|eventlog.Warning|eventlog.Error) +} + +func unregisterService() error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + s, err := m.OpenService(*flServiceName) + if err != nil { + return err + } + defer s.Close() + + eventlog.Remove(*flServiceName) + err = s.Delete() + if err != nil { + return err + } + return nil +} + +// initService is the entry point for running the daemon as a Windows +// service. It returns an indication to stop (if registering/un-registering); +// an indication of whether it is running as a service; and an error. +func initService(daemonCli *DaemonCli) (bool, bool, error) { + if *flUnregisterService { + if *flRegisterService { + return true, false, errors.New("--register-service and --unregister-service cannot be used together") + } + return true, false, unregisterService() + } + + if *flRegisterService { + return true, false, registerService() + } + + if !*flRunService { + return false, false, nil + } + + interactive, err := svc.IsAnInteractiveSession() + if err != nil { + return false, false, err + } + + h := &handler{ + tosvc: make(chan bool), + fromsvc: make(chan error), + daemonCli: daemonCli, + } + + var log *eventlog.Log + if !interactive { + log, err = eventlog.Open(*flServiceName) + if err != nil { + return false, false, err + } + } + + logrus.AddHook(&etwHook{log}) + logrus.SetOutput(ioutil.Discard) + + service = h + go func() { + if interactive { + err = debug.Run(*flServiceName, h) + } else { + err = svc.Run(*flServiceName, h) + } + + h.fromsvc <- err + }() + + // Wait for the first signal from the service handler. + err = <-h.fromsvc + if err != nil { + return false, false, err + } + return false, true, nil +} + +func (h *handler) started() error { + // This must be delayed until daemonCli initializes Config.Root + err := initPanicFile(filepath.Join(h.daemonCli.Config.Root, "panic.log")) + if err != nil { + return err + } + + h.tosvc <- false + return nil +} + +func (h *handler) stopped(err error) { + logrus.Debugf("Stopping service: %v", err) + h.tosvc <- err != nil + <-h.fromsvc +} + +func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (bool, uint32) { + s <- svc.Status{State: svc.StartPending, Accepts: 0} + // Unblock initService() + h.fromsvc <- nil + + // Wait for initialization to complete. + failed := <-h.tosvc + if failed { + logrus.Debug("Aborting service start due to failure during initialization") + return true, 1 + } + + s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)} + logrus.Debug("Service running") +Loop: + for { + select { + case failed = <-h.tosvc: + break Loop + case c := <-r: + switch c.Cmd { + case svc.Cmd(windows.SERVICE_CONTROL_PARAMCHANGE): + h.daemonCli.reloadConfig() + case svc.Interrogate: + s <- c.CurrentStatus + case svc.Stop, svc.Shutdown: + s <- svc.Status{State: svc.StopPending, Accepts: 0} + h.daemonCli.stop() + } + } + } + + removePanicFile() + if failed { + return true, 1 + } + return false, 0 +} + +func initPanicFile(path string) error { + var err error + panicFile, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0) + if err != nil { + return err + } + + st, err := panicFile.Stat() + if err != nil { + return err + } + + // If there are contents in the file already, move the file out of the way + // and replace it. + if st.Size() > 0 { + panicFile.Close() + os.Rename(path, path+".old") + panicFile, err = os.Create(path) + if err != nil { + return err + } + } + + // Update STD_ERROR_HANDLE to point to the panic file so that Go writes to + // it when it panics. Remember the old stderr to restore it before removing + // the panic file. + sh := syscall.STD_ERROR_HANDLE + h, err := syscall.GetStdHandle(sh) + if err != nil { + return err + } + + oldStderr = h + + r, _, err := setStdHandle.Call(uintptr(sh), uintptr(panicFile.Fd())) + if r == 0 && err != nil { + return err + } + + // Reset os.Stderr to the panic file (so fmt.Fprintf(os.Stderr,...) actually gets redirected) + os.Stderr = os.NewFile(uintptr(panicFile.Fd()), "/dev/stderr") + + // Force threads that panic to write to stderr (the panicFile handle now), otherwise it will go into the ether + log.SetOutput(os.Stderr) + + return nil +} + +func removePanicFile() { + if st, err := panicFile.Stat(); err == nil { + if st.Size() == 0 { + sh := syscall.STD_ERROR_HANDLE + setStdHandle.Call(uintptr(sh), uintptr(oldStderr)) + panicFile.Close() + os.Remove(panicFile.Name()) + } + } +} diff --git a/components/engine/container/archive.go b/components/engine/container/archive.go new file mode 100644 index 0000000000..56e6598b9c --- /dev/null +++ b/components/engine/container/archive.go @@ -0,0 +1,76 @@ +package container + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" +) + +// ResolvePath resolves the given path in the container to a resource on the +// host. Returns a resolved path (absolute path to the resource on the host), +// the absolute path to the resource relative to the container's rootfs, and +// an error if the path points to outside the container's rootfs. +func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + if err != nil { + return "", "", err + } + + // Consider the given path as an absolute path in the container. + absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // Split the absPath into its Directory and Base components. We will + // resolve the dir in the scope of the container then append the base. + dirPath, basePath := filepath.Split(absPath) + + resolvedDirPath, err := container.GetResourcePath(dirPath) + if err != nil { + return "", "", err + } + + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + + return resolvedPath, absPath, nil +} + +// StatPath is the unexported version of StatPath. Locks and mounts should +// be acquired before calling this method and the given path should be fully +// resolved to a path on the host corresponding to the given absolute path +// inside the container. +func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { + lstat, err := os.Lstat(resolvedPath) + if err != nil { + return nil, err + } + + var linkTarget string + if lstat.Mode()&os.ModeSymlink != 0 { + // Fully evaluate the symlink in the scope of the container rootfs. + hostPath, err := container.GetResourcePath(absPath) + if err != nil { + return nil, err + } + + linkTarget, err = filepath.Rel(container.BaseFS, hostPath) + if err != nil { + return nil, err + } + + // Make it an absolute path. + linkTarget = filepath.Join(string(filepath.Separator), linkTarget) + } + + return &types.ContainerPathStat{ + Name: filepath.Base(absPath), + Size: lstat.Size(), + Mode: lstat.Mode(), + Mtime: lstat.ModTime(), + LinkTarget: linkTarget, + }, nil +} diff --git a/components/engine/container/container.go b/components/engine/container/container.go new file mode 100644 index 0000000000..e89340610c --- /dev/null +++ b/components/engine/container/container.go @@ -0,0 +1,988 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + networktypes "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/restartmanager" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" + agentexec "github.com/docker/swarmkit/agent/exec" + "github.com/opencontainers/selinux/go-selinux/label" +) + +const configFileName = "config.v2.json" + +const ( + // DefaultStopTimeout is the timeout (in seconds) for the syscall signal used to stop a container. + DefaultStopTimeout = 10 +) + +var ( + errInvalidEndpoint = fmt.Errorf("invalid endpoint while building port map info") + errInvalidNetwork = fmt.Errorf("invalid network settings while building port map info") +) + +// CommonContainer holds the fields for a container which are +// applicable across all platforms supported by the daemon. +type CommonContainer struct { + StreamConfig *stream.Config + // embed for Container to support states directly. + *State `json:"State"` // Needed for Engine API version <= 1.11 + Root string `json:"-"` // Path to the "home" of the container, including metadata. + BaseFS string `json:"-"` // Path to the graphdriver mountpoint + RWLayer layer.RWLayer `json:"-"` + ID string + Created time.Time + Managed bool + Path string + Args []string + Config *containertypes.Config + ImageID image.ID `json:"Image"` + NetworkSettings *network.Settings + LogPath string + Name string + Driver string + // MountLabel contains the options for the 'mount' command + MountLabel string + ProcessLabel string + RestartCount int + HasBeenStartedBefore bool + HasBeenManuallyStopped bool // used for unless-stopped restart policy + MountPoints map[string]*volume.MountPoint + HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable + ExecCommands *exec.Store `json:"-"` + DependencyStore agentexec.DependencyGetter `json:"-"` + SecretReferences []*swarmtypes.SecretReference + ConfigReferences []*swarmtypes.ConfigReference + // logDriver for closing + LogDriver logger.Logger `json:"-"` + LogCopier *logger.Copier `json:"-"` + restartManager restartmanager.RestartManager + attachContext *attachContext +} + +// NewBaseContainer creates a new container with its +// basic configuration. +func NewBaseContainer(id, root string) *Container { + return &Container{ + CommonContainer: CommonContainer{ + ID: id, + State: NewState(), + ExecCommands: exec.NewStore(), + Root: root, + MountPoints: make(map[string]*volume.MountPoint), + StreamConfig: stream.NewConfig(), + attachContext: &attachContext{}, + }, + } +} + +// FromDisk loads the container configuration stored in the host. +func (container *Container) FromDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Open(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + + // Load container settings + if err := dec.Decode(container); err != nil { + return err + } + + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return err + } + return container.readHostConfig() +} + +// ToDisk saves the container configuration on disk. +func (container *Container) ToDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return err + } + defer jsonSource.Close() + + enc := json.NewEncoder(jsonSource) + + // Save container settings + if err := enc.Encode(container); err != nil { + return err + } + + return container.WriteHostConfig() +} + +// ToDiskLocking saves the container configuration on disk in a thread safe way. +func (container *Container) ToDiskLocking() error { + container.Lock() + err := container.ToDisk() + container.Unlock() + return err +} + +// readHostConfig reads the host configuration from disk for the container. +func (container *Container) readHostConfig() error { + container.HostConfig = &containertypes.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.HostConfig, + // but that's OK, since we just did that above.) + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Open(pth) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { + return err + } + + container.InitDNSHostConfig() + + return nil +} + +// WriteHostConfig saves the host configuration on disk for the container. +func (container *Container) WriteHostConfig() error { + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return err + } + defer f.Close() + + return json.NewEncoder(f).Encode(&container.HostConfig) +} + +// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir +func (container *Container) SetupWorkingDirectory(rootUID, rootGID int) error { + if container.Config.WorkingDir == "" { + return nil + } + + container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) + + pth, err := container.GetResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + if err := idtools.MkdirAllNewAs(pth, 0755, rootUID, rootGID); err != nil { + pthInfo, err2 := os.Stat(pth) + if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { + return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + + return err + } + + return nil +} + +// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path +// sanitisation. Symlinks are all scoped to the BaseFS of the container, as +// though the container's BaseFS was `/`. +// +// The BaseFS of a container is the host-facing path which is bind-mounted as +// `/` inside the container. This method is essentially used to access a +// particular path inside the container as though you were a process in that +// container. +// +// NOTE: The returned path is *only* safely scoped inside the container's BaseFS +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + + cleanPath := cleanResourcePath(path) + r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) + + // Log this here on the daemon side as there's otherwise no indication apart + // from the error being propagated all the way back to the client. This makes + // debugging significantly easier and clearly indicates the error comes from the daemon. + if e != nil { + logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e) + } + return r, e +} + +// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path +// sanitisation. Symlinks are all scoped to the root of the container, as +// though the container's root was `/`. +// +// The root of a container is the host-facing configuration metadata directory. +// Only use this method to safely access the container's `container.json` or +// other metadata files. If in doubt, use container.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the container's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetRootResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + cleanPath := filepath.Join(string(os.PathSeparator), path) + return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) +} + +// ExitOnNext signals to the monitor that it should not restart the container +// after we send the kill signal. +func (container *Container) ExitOnNext() { + container.RestartManager().Cancel() +} + +// HostConfigPath returns the path to the container's JSON hostconfig +func (container *Container) HostConfigPath() (string, error) { + return container.GetRootResourcePath("hostconfig.json") +} + +// ConfigPath returns the path to the container's JSON config +func (container *Container) ConfigPath() (string, error) { + return container.GetRootResourcePath(configFileName) +} + +// CheckpointDir returns the directory checkpoints are stored in +func (container *Container) CheckpointDir() string { + return filepath.Join(container.Root, "checkpoints") +} + +// StartLogger starts a new logger driver for the container. +func (container *Container) StartLogger() (logger.Logger, error) { + cfg := container.HostConfig.LogConfig + initDriver, err := logger.GetLogDriver(cfg.Type) + if err != nil { + return nil, fmt.Errorf("failed to get logging factory: %v", err) + } + info := logger.Info{ + Config: cfg.Config, + ContainerID: container.ID, + ContainerName: container.Name, + ContainerEntrypoint: container.Path, + ContainerArgs: container.Args, + ContainerImageID: container.ImageID.String(), + ContainerImageName: container.Config.Image, + ContainerCreated: container.Created, + ContainerEnv: container.Config.Env, + ContainerLabels: container.Config.Labels, + DaemonName: "docker", + } + + // Set logging file for "json-logger" + if cfg.Type == jsonfilelog.Name { + info.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + if err != nil { + return nil, err + } + } + + l, err := initDriver(info) + if err != nil { + return nil, err + } + + if containertypes.LogMode(cfg.Config["mode"]) == containertypes.LogModeNonBlock { + bufferSize := int64(-1) + if s, exists := cfg.Config["max-buffer-size"]; exists { + bufferSize, err = units.RAMInBytes(s) + if err != nil { + return nil, err + } + } + l = logger.NewRingLogger(l, info, bufferSize) + } + return l, nil +} + +// GetProcessLabel returns the process label for the container. +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.HostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +// GetMountLabel returns the mounting label for the container. +// This label is empty if the container is privileged. +func (container *Container) GetMountLabel() string { + return container.MountLabel +} + +// GetExecIDs returns the list of exec commands running on the container. +func (container *Container) GetExecIDs() []string { + return container.ExecCommands.List() +} + +// ShouldRestart decides whether the daemon should restart the container or not. +// This is based on the container's restart policy. +func (container *Container) ShouldRestart() bool { + shouldRestart, _, _ := container.RestartManager().ShouldRestart(uint32(container.ExitCode()), container.HasBeenManuallyStopped, container.FinishedAt.Sub(container.StartedAt)) + return shouldRestart +} + +// AddMountPointWithVolume adds a new mount point configured with a volume to the container. +func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Name: vol.Name(), + Driver: vol.DriverName(), + Destination: destination, + RW: rw, + Volume: vol, + CopyData: volume.DefaultCopyMode, + } +} + +// UnmountVolumes unmounts all volumes +func (container *Container) UnmountVolumes(volumeEventLog func(name, action string, attributes map[string]string)) error { + var errors []string + for _, volumeMount := range container.MountPoints { + if volumeMount.Volume == nil { + continue + } + + if err := volumeMount.Cleanup(); err != nil { + errors = append(errors, err.Error()) + continue + } + + attributes := map[string]string{ + "driver": volumeMount.Volume.DriverName(), + "container": container.ID, + } + volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) + } + if len(errors) > 0 { + return fmt.Errorf("error while unmounting volumes for container %s: %s", container.ID, strings.Join(errors, "; ")) + } + return nil +} + +// IsDestinationMounted checks whether a path is mounted on the container or not. +func (container *Container) IsDestinationMounted(destination string) bool { + return container.MountPoints[destination] != nil +} + +// StopSignal returns the signal used to stop the container. +func (container *Container) StopSignal() int { + var stopSignal syscall.Signal + if container.Config.StopSignal != "" { + stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) + } + + if int(stopSignal) == 0 { + stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) + } + return int(stopSignal) +} + +// StopTimeout returns the timeout (in seconds) used to stop the container. +func (container *Container) StopTimeout() int { + if container.Config.StopTimeout != nil { + return *container.Config.StopTimeout + } + return DefaultStopTimeout +} + +// InitDNSHostConfig ensures that the dns fields are never nil. +// New containers don't ever have those fields nil, +// but pre created containers can still have those nil values. +// The non-recommended host configuration in the start api can +// make these fields nil again, this corrects that issue until +// we remove that behavior for good. +// See https://github.com/docker/docker/pull/17779 +// for a more detailed explanation on why we don't want that. +func (container *Container) InitDNSHostConfig() { + container.Lock() + defer container.Unlock() + if container.HostConfig.DNS == nil { + container.HostConfig.DNS = make([]string, 0) + } + + if container.HostConfig.DNSSearch == nil { + container.HostConfig.DNSSearch = make([]string, 0) + } + + if container.HostConfig.DNSOptions == nil { + container.HostConfig.DNSOptions = make([]string, 0) + } +} + +// GetEndpointInNetwork returns the container's endpoint to the provided network. +func (container *Container) GetEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) { + endpointName := strings.TrimPrefix(container.Name, "/") + return n.EndpointByName(endpointName) +} + +func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + if len(networkSettings.Ports) == 0 { + pm, err := getEndpointPortMapInfo(ep) + if err != nil { + return err + } + networkSettings.Ports = pm + } + return nil +} + +func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { + pm := nat.PortMap{} + driverInfo, err := ep.DriverInfo() + if err != nil { + return pm, err + } + + if driverInfo == nil { + // It is not an error for epInfo to be nil + return pm, nil + } + + if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { + if exposedPorts, ok := expData.([]types.TransportPort); ok { + for _, tp := range exposedPorts { + natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) + if err != nil { + return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err) + } + pm[natPort] = nil + } + } + } + + mapData, ok := driverInfo[netlabel.PortMap] + if !ok { + return pm, nil + } + + if portMapping, ok := mapData.([]types.PortBinding); ok { + for _, pp := range portMapping { + natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) + if err != nil { + return pm, err + } + natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} + pm[natPort] = append(pm[natPort], natBndg) + } + } + + return pm, nil +} + +// GetSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox +func GetSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { + pm := nat.PortMap{} + if sb == nil { + return pm + } + + for _, ep := range sb.Endpoints() { + pm, _ = getEndpointPortMapInfo(ep) + if len(pm) > 0 { + break + } + } + return pm +} + +// BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. +func (container *Container) BuildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + + if _, ok := networkSettings.Networks[n.Name()]; !ok { + networkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: &networktypes.EndpointSettings{}, + } + } + networkSettings.Networks[n.Name()].NetworkID = n.ID() + networkSettings.Networks[n.Name()].EndpointID = ep.ID() + + iface := epInfo.Iface() + if iface == nil { + return nil + } + + if iface.MacAddress() != nil { + networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() + } + + if iface.Address() != nil { + ones, _ := iface.Address().Mask.Size() + networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() + networkSettings.Networks[n.Name()].IPPrefixLen = ones + } + + if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { + onesv6, _ := iface.AddressIPv6().Mask.Size() + networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() + networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 + } + + return nil +} + +// UpdateJoinInfo updates network settings when container joins network n with endpoint ep. +func (container *Container) UpdateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if err := container.buildPortMapInfo(ep); err != nil { + return err + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + if epInfo.Gateway() != nil { + container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() + } + if epInfo.GatewayIPv6().To16() != nil { + container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() + } + + return nil +} + +// UpdateSandboxNetworkSettings updates the sandbox ID and Key. +func (container *Container) UpdateSandboxNetworkSettings(sb libnetwork.Sandbox) error { + container.NetworkSettings.SandboxID = sb.ID() + container.NetworkSettings.SandboxKey = sb.Key() + return nil +} + +// BuildJoinOptions builds endpoint Join options from a given network. +func (container *Container) BuildJoinOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) { + var joinOptions []libnetwork.EndpointOption + if epConfig, ok := container.NetworkSettings.Networks[n.Name()]; ok { + for _, str := range epConfig.Links { + name, alias, err := opts.ParseLink(str) + if err != nil { + return nil, err + } + joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias)) + } + for k, v := range epConfig.DriverOpts { + joinOptions = append(joinOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v})) + } + } + + return joinOptions, nil +} + +// BuildCreateEndpointOptions builds endpoint options from a given network. +func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epConfig *networktypes.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) { + var ( + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + createOptions []libnetwork.EndpointOption + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + + if (!container.EnableServiceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) || + container.NetworkSettings.IsAnonymousEndpoint { + createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) + } + + if epConfig != nil { + ipam := epConfig.IPAMConfig + + if ipam != nil { + var ( + ipList []net.IP + ip, ip6, linkip net.IP + ) + + for _, ips := range ipam.LinkLocalIPs { + if linkip = net.ParseIP(ips); linkip == nil && ips != "" { + return nil, fmt.Errorf("Invalid link-local IP address:%s", ipam.LinkLocalIPs) + } + ipList = append(ipList, linkip) + + } + + if ip = net.ParseIP(ipam.IPv4Address); ip == nil && ipam.IPv4Address != "" { + return nil, fmt.Errorf("Invalid IPv4 address:%s)", ipam.IPv4Address) + } + + if ip6 = net.ParseIP(ipam.IPv6Address); ip6 == nil && ipam.IPv6Address != "" { + return nil, fmt.Errorf("Invalid IPv6 address:%s)", ipam.IPv6Address) + } + + createOptions = append(createOptions, + libnetwork.CreateOptionIpam(ip, ip6, ipList, nil)) + + } + + for _, alias := range epConfig.Aliases { + createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias)) + } + } + + if container.NetworkSettings.Service != nil { + svcCfg := container.NetworkSettings.Service + + var vip string + if svcCfg.VirtualAddresses[n.ID()] != nil { + vip = svcCfg.VirtualAddresses[n.ID()].IPv4 + } + + var portConfigs []*libnetwork.PortConfig + for _, portConfig := range svcCfg.ExposedPorts { + portConfigs = append(portConfigs, &libnetwork.PortConfig{ + Name: portConfig.Name, + Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + + createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()])) + } + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) + } + + // configs that are applicable only for the endpoint in the network + // to which container was connected to on docker run. + // Ideally all these network-specific endpoint configurations must be moved under + // container.NetworkSettings.Networks[n.Name()] + if n.Name() == container.HostConfig.NetworkMode.NetworkName() || + (n.Name() == defaultNetName && container.HostConfig.NetworkMode.IsDefault()) { + if container.Config.MacAddress != "" { + mac, err := net.ParseMAC(container.Config.MacAddress) + if err != nil { + return nil, err + } + + genericOption := options.Generic{ + netlabel.MacAddress: mac, + } + + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) + } + for k, v := range epConfig.DriverOpts { + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v})) + } + + } + + // Port-mapping rules belong to the container & applicable only to non-internal networks + portmaps := GetSandboxPortMapInfo(sb) + if n.Info().Internal() || len(portmaps) > 0 { + return createOptions, nil + } + + if container.HostConfig.PortBindings != nil { + for p, b := range container.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := container.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + var dns []string + + if len(container.HostConfig.DNS) > 0 { + dns = container.HostConfig.DNS + } else if len(daemonDNS) > 0 { + dns = daemonDNS + } + + if len(dns) > 0 { + createOptions = append(createOptions, + libnetwork.CreateOptionDNS(dns)) + } + + createOptions = append(createOptions, + libnetwork.CreateOptionPortMapping(pbList), + libnetwork.CreateOptionExposedPorts(exposeList)) + + return createOptions, nil +} + +// UpdateMonitor updates monitor configure for running container +func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { + type policySetter interface { + SetPolicy(containertypes.RestartPolicy) + } + + if rm, ok := container.RestartManager().(policySetter); ok { + rm.SetPolicy(restartPolicy) + } +} + +// FullHostname returns hostname and optional domain appended to it. +func (container *Container) FullHostname() string { + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + return fullHostname +} + +// RestartManager returns the current restartmanager instance connected to container. +func (container *Container) RestartManager() restartmanager.RestartManager { + if container.restartManager == nil { + container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy, container.RestartCount) + } + return container.restartManager +} + +// ResetRestartManager initializes new restartmanager based on container config +func (container *Container) ResetRestartManager(resetCount bool) { + if container.restartManager != nil { + container.restartManager.Cancel() + } + if resetCount { + container.RestartCount = 0 + } + container.restartManager = nil +} + +type attachContext struct { + ctx context.Context + cancel context.CancelFunc + mu sync.Mutex +} + +// InitAttachContext initializes or returns existing context for attach calls to +// track container liveness. +func (container *Container) InitAttachContext() context.Context { + container.attachContext.mu.Lock() + defer container.attachContext.mu.Unlock() + if container.attachContext.ctx == nil { + container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) + } + return container.attachContext.ctx +} + +// CancelAttachContext cancels attach context. All attach calls should detach +// after this call. +func (container *Container) CancelAttachContext() { + container.attachContext.mu.Lock() + if container.attachContext.ctx != nil { + container.attachContext.cancel() + container.attachContext.ctx = nil + } + container.attachContext.mu.Unlock() +} + +func (container *Container) startLogging() error { + if container.HostConfig.LogConfig.Type == "none" { + return nil // do not start logging routines + } + + l, err := container.StartLogger() + if err != nil { + return fmt.Errorf("failed to initialize logging driver: %v", err) + } + + copier := logger.NewCopier(map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) + container.LogCopier = copier + copier.Run() + container.LogDriver = l + + // set LogPath field only for json-file logdriver + if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { + container.LogPath = jl.LogPath() + } + + return nil +} + +// StdinPipe gets the stdin stream of the container +func (container *Container) StdinPipe() io.WriteCloser { + return container.StreamConfig.StdinPipe() +} + +// StdoutPipe gets the stdout stream of the container +func (container *Container) StdoutPipe() io.ReadCloser { + return container.StreamConfig.StdoutPipe() +} + +// StderrPipe gets the stderr stream of the container +func (container *Container) StderrPipe() io.ReadCloser { + return container.StreamConfig.StderrPipe() +} + +// CloseStreams closes the container's stdio streams +func (container *Container) CloseStreams() error { + return container.StreamConfig.CloseStreams() +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (container *Container) InitializeStdio(iop libcontainerd.IOPipe) error { + if err := container.startLogging(); err != nil { + container.Reset(false) + return err + } + + container.StreamConfig.CopyToPipe(iop) + + if container.StreamConfig.Stdin() == nil && !container.Config.Tty { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("error closing stdin: %+v", err) + } + } + } + + return nil +} + +// SecretMountPath returns the path of the secret mount for the container +func (container *Container) SecretMountPath() string { + return filepath.Join(container.Root, "secrets") +} + +// SecretFilePath returns the path to the location of a secret on the host. +func (container *Container) SecretFilePath(secretRef swarmtypes.SecretReference) string { + return filepath.Join(container.SecretMountPath(), secretRef.SecretID) +} + +func getSecretTargetPath(r *swarmtypes.SecretReference) string { + if filepath.IsAbs(r.File.Name) { + return r.File.Name + } + + return filepath.Join(containerSecretMountPath, r.File.Name) +} + +// ConfigsDirPath returns the path to the directory where configs are stored on +// disk. +func (container *Container) ConfigsDirPath() string { + return filepath.Join(container.Root, "configs") +} + +// ConfigFilePath returns the path to the on-disk location of a config. +func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) string { + return filepath.Join(container.ConfigsDirPath(), configRef.ConfigID) +} diff --git a/components/engine/container/container_linux.go b/components/engine/container/container_linux.go new file mode 100644 index 0000000000..4d4c16b563 --- /dev/null +++ b/components/engine/container/container_linux.go @@ -0,0 +1,9 @@ +package container + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + return unix.Unmount(path, unix.MNT_DETACH) +} diff --git a/components/engine/container/container_notlinux.go b/components/engine/container/container_notlinux.go new file mode 100644 index 0000000000..768c762d2f --- /dev/null +++ b/components/engine/container/container_notlinux.go @@ -0,0 +1,23 @@ +// +build solaris freebsd + +package container + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + //Solaris and FreeBSD do not support the lazy unmount or MNT_DETACH feature. + // Therefore there are separate definitions for this. + return unix.Unmount(path, 0) +} + +// SecretMounts returns the mounts for the secret path +func (container *Container) SecretMounts() []Mount { + return nil +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + return nil +} diff --git a/components/engine/container/container_unit_test.go b/components/engine/container/container_unit_test.go new file mode 100644 index 0000000000..01d06e4eb8 --- /dev/null +++ b/components/engine/container/container_unit_test.go @@ -0,0 +1,76 @@ +package container + +import ( + "path/filepath" + "testing" + + "github.com/docker/docker/api/types/container" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/signal" +) + +func TestContainerStopSignal(t *testing.T) { + c := &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{}, + }, + } + + def, err := signal.ParseSignal(signal.DefaultStopSignal) + if err != nil { + t.Fatal(err) + } + + s := c.StopSignal() + if s != int(def) { + t.Fatalf("Expected %v, got %v", def, s) + } + + c = &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{StopSignal: "SIGKILL"}, + }, + } + s = c.StopSignal() + if s != 9 { + t.Fatalf("Expected 9, got %v", s) + } +} + +func TestContainerStopTimeout(t *testing.T) { + c := &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{}, + }, + } + + s := c.StopTimeout() + if s != DefaultStopTimeout { + t.Fatalf("Expected %v, got %v", DefaultStopTimeout, s) + } + + stopTimeout := 15 + c = &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{StopTimeout: &stopTimeout}, + }, + } + s = c.StopSignal() + if s != 15 { + t.Fatalf("Expected 15, got %v", s) + } +} + +func TestContainerSecretReferenceDestTarget(t *testing.T) { + ref := &swarmtypes.SecretReference{ + File: &swarmtypes.SecretReferenceFileTarget{ + Name: "app", + }, + } + + d := getSecretTargetPath(ref) + expected := filepath.Join(containerSecretMountPath, "app") + if d != expected { + t.Fatalf("expected secret dest %q; received %q", expected, d) + } +} diff --git a/components/engine/container/container_unix.go b/components/engine/container/container_unix.go new file mode 100644 index 0000000000..b0a32e6d5b --- /dev/null +++ b/components/engine/container/container_unix.go @@ -0,0 +1,482 @@ +// +build linux freebsd solaris + +package container + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/volume" + "github.com/opencontainers/selinux/go-selinux/label" + "golang.org/x/sys/unix" +) + +const ( + containerSecretMountPath = "/run/secrets" +) + +// Container holds the fields specific to unixen implementations. +// See CommonContainer for standard fields common to all containers. +type Container struct { + CommonContainer + + // Fields below here are platform specific. + AppArmorProfile string + HostnamePath string + HostsPath string + ShmPath string + ResolvConfPath string + SeccompProfile string + NoNewPrivileges bool +} + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool +} + +// CreateDaemonEnvironment returns the list of all environment variables given the list of +// environment variables related to links. +// Sets PATH, HOSTNAME and if container.Config.Tty is set: TERM. +// The defaults set here do not override the values in container.Config.Env +func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string) []string { + // Setup environment + env := []string{ + "PATH=" + system.DefaultPathEnv, + "HOSTNAME=" + container.Config.Hostname, + } + if tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = ReplaceOrAppendEnvValues(env, container.Config.Env) + return env +} + +// TrySetNetworkMount attempts to set the network mounts given a provided destination and +// the path to use for it; return true if the given destination was a network mount file +func (container *Container) TrySetNetworkMount(destination string, path string) bool { + if destination == "/etc/resolv.conf" { + container.ResolvConfPath = path + return true + } + if destination == "/etc/hostname" { + container.HostnamePath = path + return true + } + if destination == "/etc/hosts" { + container.HostsPath = path + return true + } + + return false +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + hostnamePath, err := container.GetRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +// NetworkMounts returns the list of network mounts. +func (container *Container) NetworkMounts() []Mount { + var mounts []Mount + shared := container.HostConfig.NetworkMode.IsContainer() + if container.ResolvConfPath != "" { + if _, err := os.Stat(container.ResolvConfPath); err != nil { + logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + } else { + if !container.HasMountFor("/etc/resolv.conf") { + label.Relabel(container.ResolvConfPath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.ResolvConfPath, + Destination: "/etc/resolv.conf", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + if container.HostnamePath != "" { + if _, err := os.Stat(container.HostnamePath); err != nil { + logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + } else { + if !container.HasMountFor("/etc/hostname") { + label.Relabel(container.HostnamePath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hostname"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostnamePath, + Destination: "/etc/hostname", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + if container.HostsPath != "" { + if _, err := os.Stat(container.HostsPath); err != nil { + logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + } else { + if !container.HasMountFor("/etc/hosts") { + label.Relabel(container.HostsPath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hosts"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostsPath, + Destination: "/etc/hosts", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + return mounts +} + +// CopyImagePathContent copies files in destination to the volume. +func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { + rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) + if err != nil { + return err + } + + if _, err = ioutil.ReadDir(rootfs); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + id := stringid.GenerateNonCryptoID() + path, err := v.Mount(id) + if err != nil { + return err + } + + defer func() { + if err := v.Unmount(id); err != nil { + logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err) + } + }() + if err := label.Relabel(path, container.MountLabel, true); err != nil && err != unix.ENOTSUP { + return err + } + return copyExistingContents(rootfs, path) +} + +// ShmResourcePath returns path to shm +func (container *Container) ShmResourcePath() (string, error) { + return container.GetRootResourcePath("shm") +} + +// HasMountFor checks if path is a mountpoint +func (container *Container) HasMountFor(path string) bool { + _, exists := container.MountPoints[path] + return exists +} + +// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { + if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() { + return + } + + var warnings []string + + if !container.HasMountFor("/dev/shm") { + shmPath, err := container.ShmResourcePath() + if err != nil { + logrus.Error(err) + warnings = append(warnings, err.Error()) + } else if shmPath != "" { + if err := unmount(shmPath); err != nil && !os.IsNotExist(err) { + warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) + } + + } + } + + if len(warnings) > 0 { + logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) + } +} + +// IpcMounts returns the list of IPC mounts +func (container *Container) IpcMounts() []Mount { + var mounts []Mount + + if !container.HasMountFor("/dev/shm") { + label.SetFileLabel(container.ShmPath, container.MountLabel) + mounts = append(mounts, Mount{ + Source: container.ShmPath, + Destination: "/dev/shm", + Writable: true, + Propagation: string(volume.DefaultPropagationMode), + }) + } + + return mounts +} + +// SecretMounts returns the mounts for the secret path. +func (container *Container) SecretMounts() []Mount { + var mounts []Mount + for _, r := range container.SecretReferences { + if r.File == nil { + continue + } + mounts = append(mounts, Mount{ + Source: container.SecretFilePath(*r), + Destination: getSecretTargetPath(r), + Writable: false, + }) + } + + return mounts +} + +// UnmountSecrets unmounts the local tmpfs for secrets +func (container *Container) UnmountSecrets() error { + if _, err := os.Stat(container.SecretMountPath()); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + return detachMounted(container.SecretMountPath()) +} + +// ConfigMounts returns the mounts for configs. +func (container *Container) ConfigMounts() []Mount { + var mounts []Mount + for _, configRef := range container.ConfigReferences { + if configRef.File == nil { + continue + } + mounts = append(mounts, Mount{ + Source: container.ConfigFilePath(*configRef), + Destination: configRef.File.Name, + Writable: false, + }) + } + + return mounts +} + +// UpdateContainer updates configuration of a container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + + // update resources of container + resources := hostConfig.Resources + cResources := &container.HostConfig.Resources + + // validate NanoCPUs, CPUPeriod, and CPUQuota + // Becuase NanoCPU effectively updates CPUPeriod/CPUQuota, + // once NanoCPU is already set, updating CPUPeriod/CPUQuota will be blocked, and vice versa. + // In the following we make sure the intended update (resources) does not conflict with the existing (cResource). + if resources.NanoCPUs > 0 && cResources.CPUPeriod > 0 { + return fmt.Errorf("Conflicting options: Nano CPUs cannot be updated as CPU Period has already been set") + } + if resources.NanoCPUs > 0 && cResources.CPUQuota > 0 { + return fmt.Errorf("Conflicting options: Nano CPUs cannot be updated as CPU Quota has already been set") + } + if resources.CPUPeriod > 0 && cResources.NanoCPUs > 0 { + return fmt.Errorf("Conflicting options: CPU Period cannot be updated as NanoCPUs has already been set") + } + if resources.CPUQuota > 0 && cResources.NanoCPUs > 0 { + return fmt.Errorf("Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set") + } + + if resources.BlkioWeight != 0 { + cResources.BlkioWeight = resources.BlkioWeight + } + if resources.CPUShares != 0 { + cResources.CPUShares = resources.CPUShares + } + if resources.NanoCPUs != 0 { + cResources.NanoCPUs = resources.NanoCPUs + } + if resources.CPUPeriod != 0 { + cResources.CPUPeriod = resources.CPUPeriod + } + if resources.CPUQuota != 0 { + cResources.CPUQuota = resources.CPUQuota + } + if resources.CpusetCpus != "" { + cResources.CpusetCpus = resources.CpusetCpus + } + if resources.CpusetMems != "" { + cResources.CpusetMems = resources.CpusetMems + } + if resources.Memory != 0 { + // if memory limit smaller than already set memoryswap limit and doesn't + // update the memoryswap limit, then error out. + if resources.Memory > cResources.MemorySwap && resources.MemorySwap == 0 { + return fmt.Errorf("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time") + } + cResources.Memory = resources.Memory + } + if resources.MemorySwap != 0 { + cResources.MemorySwap = resources.MemorySwap + } + if resources.MemoryReservation != 0 { + cResources.MemoryReservation = resources.MemoryReservation + } + if resources.KernelMemory != 0 { + cResources.KernelMemory = resources.KernelMemory + } + + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving updated container: %v", err) + return err + } + + return nil +} + +// DetachAndUnmount uses a detached mount on all mount destinations, then +// unmounts each volume normally. +// This is used from daemon/archive for `docker cp` +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + networkMounts := container.NetworkMounts() + mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts)) + + for _, mntPoint := range container.MountPoints { + dest, err := container.GetResourcePath(mntPoint.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, m := range networkMounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, mountPath := range mountPaths { + if err := detachMounted(mountPath); err != nil { + logrus.Warnf("%s unmountVolumes: Failed to do lazy umount fo volume '%s': %v", container.ID, mountPath, err) + } + } + return container.UnmountVolumes(volumeEventLog) +} + +// copyExistingContents copies from the source to the destination and +// ensures the ownership is appropriately set. +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty, copies files from the root into the volume + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + } + } + return copyOwnership(source, destination) +} + +// copyOwnership copies the permissions and uid:gid of the source file +// to the destination file +func copyOwnership(source, destination string) error { + stat, err := system.Stat(source) + if err != nil { + return err + } + + if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { + return err + } + + return os.Chmod(destination, os.FileMode(stat.Mode())) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + for dest, data := range container.HostConfig.Tmpfs { + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + for dest, mnt := range container.MountPoints { + if mnt.Type == mounttypes.TypeTmpfs { + data, err := volume.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly) + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + } + return mounts, nil +} + +// cleanResourcePath cleans a resource path and prepares to combine with mnt path +func cleanResourcePath(path string) string { + return filepath.Join(string(os.PathSeparator), path) +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return false +} diff --git a/components/engine/container/container_windows.go b/components/engine/container/container_windows.go new file mode 100644 index 0000000000..bffd9520e2 --- /dev/null +++ b/components/engine/container/container_windows.go @@ -0,0 +1,213 @@ +// +build windows + +package container + +import ( + "fmt" + "os" + "path/filepath" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/system" +) + +const ( + containerSecretMountPath = `C:\ProgramData\Docker\secrets` + containerInternalSecretMountPath = `C:\ProgramData\Docker\internal\secrets` + containerInternalConfigsDirPath = `C:\ProgramData\Docker\internal\configs` +) + +// Container holds fields specific to the Windows implementation. See +// CommonContainer for standard fields common to all containers. +type Container struct { + CommonContainer + + // Fields below here are platform specific. + NetworkSharedContainerID string +} + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int +} + +// CreateDaemonEnvironment creates a new environment variable slice for this container. +func (container *Container) CreateDaemonEnvironment(_ bool, linkedEnv []string) []string { + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + return ReplaceOrAppendEnvValues(linkedEnv, container.Config.Env) +} + +// UnmountIpcMounts unmounts Ipc related mounts. +// This is a NOOP on windows. +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { +} + +// IpcMounts returns the list of Ipc related mounts. +func (container *Container) IpcMounts() []Mount { + return nil +} + +// CreateSecretSymlinks creates symlinks to files in the secret mount. +func (container *Container) CreateSecretSymlinks() error { + for _, r := range container.SecretReferences { + if r.File == nil { + continue + } + resolvedPath, _, err := container.ResolvePath(getSecretTargetPath(r)) + if err != nil { + return err + } + if err := system.MkdirAll(filepath.Dir(resolvedPath), 0); err != nil { + return err + } + if err := os.Symlink(filepath.Join(containerInternalSecretMountPath, r.SecretID), resolvedPath); err != nil { + return err + } + } + + return nil +} + +// SecretMounts returns the mount for the secret path. +// All secrets are stored in a single mount on Windows. Target symlinks are +// created for each secret, pointing to the files in this mount. +func (container *Container) SecretMounts() []Mount { + var mounts []Mount + if len(container.SecretReferences) > 0 { + mounts = append(mounts, Mount{ + Source: container.SecretMountPath(), + Destination: containerInternalSecretMountPath, + Writable: false, + }) + } + + return mounts +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + return os.RemoveAll(container.SecretMountPath()) +} + +// CreateConfigSymlinks creates symlinks to files in the config mount. +func (container *Container) CreateConfigSymlinks() error { + for _, configRef := range container.ConfigReferences { + if configRef.File == nil { + continue + } + resolvedPath, _, err := container.ResolvePath(configRef.File.Name) + if err != nil { + return err + } + if err := system.MkdirAll(filepath.Dir(resolvedPath), 0); err != nil { + return err + } + if err := os.Symlink(filepath.Join(containerInternalConfigsDirPath, configRef.ConfigID), resolvedPath); err != nil { + return err + } + } + + return nil +} + +// ConfigMounts returns the mount for configs. +// All configs are stored in a single mount on Windows. Target symlinks are +// created for each config, pointing to the files in this mount. +func (container *Container) ConfigMounts() []Mount { + var mounts []Mount + if len(container.ConfigReferences) > 0 { + mounts = append(mounts, Mount{ + Source: container.ConfigsDirPath(), + Destination: containerInternalConfigsDirPath, + Writable: false, + }) + } + + return mounts +} + +// DetachAndUnmount unmounts all volumes. +// On Windows it only delegates to `UnmountVolumes` since there is nothing to +// force unmount. +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + return container.UnmountVolumes(volumeEventLog) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + return mounts, nil +} + +// UpdateContainer updates configuration of a container +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + + resources := hostConfig.Resources + if resources.CPUShares != 0 || + resources.Memory != 0 || + resources.NanoCPUs != 0 || + resources.CgroupParent != "" || + resources.BlkioWeight != 0 || + len(resources.BlkioWeightDevice) != 0 || + len(resources.BlkioDeviceReadBps) != 0 || + len(resources.BlkioDeviceWriteBps) != 0 || + len(resources.BlkioDeviceReadIOps) != 0 || + len(resources.BlkioDeviceWriteIOps) != 0 || + resources.CPUPeriod != 0 || + resources.CPUQuota != 0 || + resources.CPURealtimePeriod != 0 || + resources.CPURealtimeRuntime != 0 || + resources.CpusetCpus != "" || + resources.CpusetMems != "" || + len(resources.Devices) != 0 || + len(resources.DeviceCgroupRules) != 0 || + resources.DiskQuota != 0 || + resources.KernelMemory != 0 || + resources.MemoryReservation != 0 || + resources.MemorySwap != 0 || + resources.MemorySwappiness != nil || + resources.OomKillDisable != nil || + resources.PidsLimit != 0 || + len(resources.Ulimits) != 0 || + resources.CPUCount != 0 || + resources.CPUPercent != 0 || + resources.IOMaximumIOps != 0 || + resources.IOMaximumBandwidth != 0 { + return fmt.Errorf("resource updating isn't supported on Windows") + } + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + return nil +} + +// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares +// to combine with a volume path +func cleanResourcePath(path string) string { + if len(path) >= 2 { + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + path = path[2:] + } + } + return filepath.Join(string(os.PathSeparator), path) +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + return nil +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return true +} diff --git a/components/engine/container/env.go b/components/engine/container/env.go new file mode 100644 index 0000000000..896a384c4d --- /dev/null +++ b/components/engine/container/env.go @@ -0,0 +1,43 @@ +package container + +import ( + "strings" +) + +// ReplaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + defaults[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(defaults); i++ { + if defaults[i] == "" { + defaults = append(defaults[:i], defaults[i+1:]...) + i-- + } + } + + return defaults +} diff --git a/components/engine/container/env_test.go b/components/engine/container/env_test.go new file mode 100644 index 0000000000..4ebf2640a8 --- /dev/null +++ b/components/engine/container/env_test.go @@ -0,0 +1,24 @@ +package container + +import "testing" + +func TestReplaceAndAppendEnvVars(t *testing.T) { + var ( + d = []string{"HOME=/", "FOO=foo_default"} + // remove FOO from env + // remove BAR from env (nop) + o = []string{"HOME=/root", "TERM=xterm", "FOO", "BAR"} + ) + + env := ReplaceOrAppendEnvValues(d, o) + t.Logf("default=%v, override=%v, result=%v", d, o, env) + if len(env) != 2 { + t.Fatalf("expected len of 2 got %d", len(env)) + } + if env[0] != "HOME=/root" { + t.Fatalf("expected HOME=/root got '%s'", env[0]) + } + if env[1] != "TERM=xterm" { + t.Fatalf("expected TERM=xterm got '%s'", env[1]) + } +} diff --git a/components/engine/container/health.go b/components/engine/container/health.go new file mode 100644 index 0000000000..6e3cd12f3b --- /dev/null +++ b/components/engine/container/health.go @@ -0,0 +1,49 @@ +package container + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" +) + +// Health holds the current container health-check state +type Health struct { + types.Health + stop chan struct{} // Write struct{} to stop the monitor +} + +// String returns a human-readable description of the health-check state +func (s *Health) String() string { + // This happens when the container is being shutdown and the monitor has stopped + // or the monitor has yet to be setup. + if s.stop == nil { + return types.Unhealthy + } + + switch s.Status { + case types.Starting: + return "health: starting" + default: // Healthy and Unhealthy are clear on their own + return s.Status + } +} + +// OpenMonitorChannel creates and returns a new monitor channel. If there already is one, +// it returns nil. +func (s *Health) OpenMonitorChannel() chan struct{} { + if s.stop == nil { + logrus.Debug("OpenMonitorChannel") + s.stop = make(chan struct{}) + return s.stop + } + return nil +} + +// CloseMonitorChannel closes any existing monitor channel. +func (s *Health) CloseMonitorChannel() { + if s.stop != nil { + logrus.Debug("CloseMonitorChannel: waiting for probe to stop") + close(s.stop) + s.stop = nil + logrus.Debug("CloseMonitorChannel done") + } +} diff --git a/components/engine/container/history.go b/components/engine/container/history.go new file mode 100644 index 0000000000..c80c2aa0cc --- /dev/null +++ b/components/engine/container/history.go @@ -0,0 +1,30 @@ +package container + +import "sort" + +// History is a convenience type for storing a list of containers, +// sorted by creation date in descendant order. +type History []*Container + +// Len returns the number of containers in the history. +func (history *History) Len() int { + return len(*history) +} + +// Less compares two containers and returns true if the second one +// was created before the first one. +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +// Swap switches containers i and j positions in the history. +func (history *History) Swap(i, j int) { + containers := *history + containers[i], containers[j] = containers[j], containers[i] +} + +// sort orders the history by creation date in descendant order. +func (history *History) sort() { + sort.Sort(history) +} diff --git a/components/engine/container/memory_store.go b/components/engine/container/memory_store.go new file mode 100644 index 0000000000..706407a71c --- /dev/null +++ b/components/engine/container/memory_store.go @@ -0,0 +1,95 @@ +package container + +import ( + "sync" +) + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Container + sync.RWMutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Store { + return &memoryStore{ + s: make(map[string]*Container), + } +} + +// Add appends a new container to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a container from the store by id. +func (c *memoryStore) Get(id string) *Container { + var res *Container + c.RLock() + res = c.s[id] + c.RUnlock() + return res +} + +// Delete removes a container from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of containers from the store. +// The containers are ordered by creation date. +func (c *memoryStore) List() []*Container { + containers := History(c.all()) + containers.sort() + return containers +} + +// Size returns the number of containers in the store. +func (c *memoryStore) Size() int { + c.RLock() + defer c.RUnlock() + return len(c.s) +} + +// First returns the first container found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Container { + for _, cont := range c.all() { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every container in the store. +// This operation is asynchronous in the memory store. +// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + wg := new(sync.WaitGroup) + for _, cont := range c.all() { + wg.Add(1) + go func(container *Container) { + apply(container) + wg.Done() + }(cont) + } + + wg.Wait() +} + +func (c *memoryStore) all() []*Container { + c.RLock() + containers := make([]*Container, 0, len(c.s)) + for _, cont := range c.s { + containers = append(containers, cont) + } + c.RUnlock() + return containers +} + +var _ Store = &memoryStore{} diff --git a/components/engine/container/memory_store_test.go b/components/engine/container/memory_store_test.go new file mode 100644 index 0000000000..8d26d1a962 --- /dev/null +++ b/components/engine/container/memory_store_test.go @@ -0,0 +1,106 @@ +package container + +import ( + "testing" + "time" +) + +func TestNewMemoryStore(t *testing.T) { + s := NewMemoryStore() + m, ok := s.(*memoryStore) + if !ok { + t.Fatalf("store is not a memory store %v", s) + } + if m.s == nil { + t.Fatal("expected store map to not be nil") + } +} + +func TestAddContainers(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + if s.Size() != 1 { + t.Fatalf("expected store size 1, got %v", s.Size()) + } +} + +func TestGetContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + c := s.Get("id") + if c == nil { + t.Fatal("expected container to not be nil") + } +} + +func TestDeleteContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + s.Delete("id") + if c := s.Get("id"); c != nil { + t.Fatalf("expected container to be nil after removal, got %v", c) + } + + if s.Size() != 0 { + t.Fatalf("expected store size to be 0, got %v", s.Size()) + } +} + +func TestListContainers(t *testing.T) { + s := NewMemoryStore() + + cont := NewBaseContainer("id", "root") + cont.Created = time.Now() + cont2 := NewBaseContainer("id2", "root") + cont2.Created = time.Now().Add(24 * time.Hour) + + s.Add("id", cont) + s.Add("id2", cont2) + + list := s.List() + if len(list) != 2 { + t.Fatalf("expected list size 2, got %v", len(list)) + } + if list[0].ID != "id2" { + t.Fatalf("expected id2, got %v", list[0].ID) + } +} + +func TestFirstContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + first := s.First(func(cont *Container) bool { + return cont.ID == "id2" + }) + + if first == nil { + t.Fatal("expected container to not be nil") + } + if first.ID != "id2" { + t.Fatalf("expected id2, got %v", first) + } +} + +func TestApplyAllContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + s.ApplyAll(func(cont *Container) { + if cont.ID == "id2" { + cont.ID = "newID" + } + }) + + cont := s.Get("id2") + if cont == nil { + t.Fatal("expected container to not be nil") + } + if cont.ID != "newID" { + t.Fatalf("expected newID, got %v", cont.ID) + } +} diff --git a/components/engine/container/monitor.go b/components/engine/container/monitor.go new file mode 100644 index 0000000000..f05e72b25f --- /dev/null +++ b/components/engine/container/monitor.go @@ -0,0 +1,46 @@ +package container + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + loggerCloseTimeout = 10 * time.Second +) + +// Reset puts a container into a state where it can be restarted again. +func (container *Container) Reset(lock bool) { + if lock { + container.Lock() + defer container.Unlock() + } + + if err := container.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", container.ID, err) + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.StreamConfig.NewInputPipes() + } + + if container.LogDriver != nil { + if container.LogCopier != nil { + exit := make(chan struct{}) + go func() { + container.LogCopier.Wait() + close(exit) + }() + select { + case <-time.After(loggerCloseTimeout): + logrus.Warn("Logger didn't exit in time: logs may be truncated") + case <-exit: + } + } + container.LogDriver.Close() + container.LogCopier = nil + container.LogDriver = nil + } +} diff --git a/components/engine/container/mounts_unix.go b/components/engine/container/mounts_unix.go new file mode 100644 index 0000000000..c52abed2dc --- /dev/null +++ b/components/engine/container/mounts_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Data string `json:"data"` + Propagation string `json:"mountpropagation"` +} diff --git a/components/engine/container/mounts_windows.go b/components/engine/container/mounts_windows.go new file mode 100644 index 0000000000..01b327f788 --- /dev/null +++ b/components/engine/container/mounts_windows.go @@ -0,0 +1,8 @@ +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` +} diff --git a/components/engine/container/state.go b/components/engine/container/state.go new file mode 100644 index 0000000000..e99fe008bb --- /dev/null +++ b/components/engine/container/state.go @@ -0,0 +1,378 @@ +package container + +import ( + "errors" + "fmt" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/go-units" +) + +// State holds the current container state, and has methods to get and +// set the state. Container has an embed, which allows all of the +// functions defined against State to run against Container. +type State struct { + sync.Mutex + // Note that `Running` and `Paused` are not mutually exclusive: + // When pausing a container (on Linux), the cgroups freezer is used to suspend + // all processes in the container. Freezing the process requires the process to + // be running. As a result, paused containers are both `Running` _and_ `Paused`. + Running bool + Paused bool + Restarting bool + OOMKilled bool + RemovalInProgress bool // Not need for this to be persistent on disk. + Dead bool + Pid int + ExitCodeValue int `json:"ExitCode"` + ErrorMsg string `json:"Error"` // contains last known error when starting the container + StartedAt time.Time + FinishedAt time.Time + Health *Health + + waitStop chan struct{} + waitRemove chan struct{} +} + +// StateStatus is used to return container wait results. +// Implements exec.ExitCode interface. +// This type is needed as State include a sync.Mutex field which make +// copying it unsafe. +type StateStatus struct { + exitCode int + err error +} + +// ExitCode returns current exitcode for the state. +func (s StateStatus) ExitCode() int { + return s.exitCode +} + +// Err returns current error for the state. Returns nil if the container had +// exited on its own. +func (s StateStatus) Err() error { + return s.err +} + +// NewState creates a default state object with a fresh channel for state changes. +func NewState() *State { + return &State{ + waitStop: make(chan struct{}), + waitRemove: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + if h := s.Health; h != nil { + return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)), h.String()) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.RemovalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// HealthString returns a single string to describe health status. +func (s *State) HealthString() string { + if s.Health == nil { + return types.NoHealthcheck + } + + return s.Health.String() +} + +// IsValidHealthString checks if the provided string is a valid container health status or not. +func IsValidHealthString(s string) bool { + return s == types.Starting || + s == types.Healthy || + s == types.Unhealthy || + s == types.NoHealthcheck +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.RemovalInProgress { + return "removing" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +// IsValidStateString checks if the provided string is a valid container state or not. +func IsValidStateString(s string) bool { + if s != "paused" && + s != "restarting" && + s != "removing" && + s != "running" && + s != "dead" && + s != "created" && + s != "exited" { + return false + } + return true +} + +// WaitCondition is an enum type for different states to wait for. +type WaitCondition int + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = iota + WaitConditionNextExit + WaitConditionRemoved +) + +// Wait waits until the continer is in a certain state indicated by the given +// condition. A context must be used for cancelling the request, controlling +// timeouts, and avoiding goroutine leaks. Wait must be called without holding +// the state lock. Returns a channel from which the caller will receive the +// result. If the container exited on its own, the result's Err() method will +// be nil and its ExitCode() method will return the conatiners exit code, +// otherwise, the results Err() method will return an error indicating why the +// wait operation failed. +func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateStatus { + s.Lock() + defer s.Unlock() + + if condition == WaitConditionNotRunning && !s.Running { + // Buffer so we can put it in the channel now. + resultC := make(chan StateStatus, 1) + + // Send the current status. + resultC <- StateStatus{ + exitCode: s.ExitCode(), + err: s.Err(), + } + + return resultC + } + + // If we are waiting only for removal, the waitStop channel should + // remain nil and block forever. + var waitStop chan struct{} + if condition < WaitConditionRemoved { + waitStop = s.waitStop + } + + // Always wait for removal, just in case the container gets removed + // while it is still in a "created" state, in which case it is never + // actually stopped. + waitRemove := s.waitRemove + + resultC := make(chan StateStatus) + + go func() { + select { + case <-ctx.Done(): + // Context timeout or cancellation. + resultC <- StateStatus{ + exitCode: -1, + err: ctx.Err(), + } + return + case <-waitStop: + case <-waitRemove: + } + + s.Lock() + result := StateStatus{ + exitCode: s.ExitCode(), + err: s.Err(), + } + s.Unlock() + + resultC <- result + }() + + return resultC +} + +// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +// GetPID holds the process id of a container. +func (s *State) GetPID() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +// ExitCode returns current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) ExitCode() int { + return s.ExitCodeValue +} + +// SetExitCode sets current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) SetExitCode(ec int) { + s.ExitCodeValue = ec +} + +// SetRunning sets the state of the container to "running". +func (s *State) SetRunning(pid int, initial bool) { + s.ErrorMsg = "" + s.Running = true + s.Restarting = false + s.ExitCodeValue = 0 + s.Pid = pid + if initial { + s.StartedAt = time.Now().UTC() + } +} + +// SetStopped sets the container state to "stopped" without locking. +func (s *State) SetStopped(exitStatus *ExitStatus) { + s.Running = false + s.Paused = false + s.Restarting = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitStop) // Fire waiters for stop + s.waitStop = make(chan struct{}) +} + +// SetRestarting sets the container state to "restarting" without locking. +// It also sets the container PID to 0. +func (s *State) SetRestarting(exitStatus *ExitStatus) { + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitStop) // Fire waiters for stop + s.waitStop = make(chan struct{}) +} + +// SetError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) SetError(err error) { + s.ErrorMsg = err.Error() +} + +// IsPaused returns whether the container is paused or not. +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} + +// IsRestarting returns whether the container is restarting or not. +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +// SetRemovalInProgress sets the container state as being removed. +// It returns true if the container was already in that state. +func (s *State) SetRemovalInProgress() bool { + s.Lock() + defer s.Unlock() + if s.RemovalInProgress { + return true + } + s.RemovalInProgress = true + return false +} + +// ResetRemovalInProgress makes the RemovalInProgress state to false. +func (s *State) ResetRemovalInProgress() { + s.Lock() + s.RemovalInProgress = false + s.Unlock() +} + +// SetDead sets the container state to "dead" +func (s *State) SetDead() { + s.Lock() + s.Dead = true + s.Unlock() +} + +// SetRemoved assumes this container is already in the "dead" state and +// closes the internal waitRemove channel to unblock callers waiting for a +// container to be removed. +func (s *State) SetRemoved() { + s.Lock() + close(s.waitRemove) // Unblock those waiting on remove. + s.Unlock() +} + +// Err returns an error if there is one. +func (s *State) Err() error { + if s.ErrorMsg != "" { + return errors.New(s.ErrorMsg) + } + return nil +} diff --git a/components/engine/container/state_solaris.go b/components/engine/container/state_solaris.go new file mode 100644 index 0000000000..1229650efa --- /dev/null +++ b/components/engine/container/state_solaris.go @@ -0,0 +1,7 @@ +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode +} diff --git a/components/engine/container/state_test.go b/components/engine/container/state_test.go new file mode 100644 index 0000000000..2a90e5541d --- /dev/null +++ b/components/engine/container/state_test.go @@ -0,0 +1,168 @@ +package container + +import ( + "context" + "testing" + "time" + + "github.com/docker/docker/api/types" +) + +func TestIsValidHealthString(t *testing.T) { + contexts := []struct { + Health string + Expected bool + }{ + {types.Healthy, true}, + {types.Unhealthy, true}, + {types.Starting, true}, + {types.NoHealthcheck, true}, + {"fail", false}, + } + + for _, c := range contexts { + v := IsValidHealthString(c.Health) + if v != c.Expected { + t.Fatalf("Expected %t, but got %t", c.Expected, v) + } + } +} + +func TestStateRunStop(t *testing.T) { + s := NewState() + + // Begin another wait with WaitConditionRemoved. It should complete + // within 200 milliseconds. + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + removalWait := s.Wait(ctx, WaitConditionRemoved) + + // Full lifecycle two times. + for i := 1; i <= 2; i++ { + // A wait with WaitConditionNotRunning should return + // immediately since the state is now either "created" (on the + // first iteration) or "exited" (on the second iteration). It + // shouldn't take more than 50 milliseconds. + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + // Expectx exit code to be i-1 since it should be the exit + // code from the previous loop or 0 for the created state. + if status := <-s.Wait(ctx, WaitConditionNotRunning); status.ExitCode() != i-1 { + t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i-1, status.Err()) + } + + // A wait with WaitConditionNextExit should block until the + // container has started and exited. It shouldn't take more + // than 100 milliseconds. + ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + initialWait := s.Wait(ctx, WaitConditionNextExit) + + // Set the state to "Running". + s.Lock() + s.SetRunning(i, true) + s.Unlock() + + // Assert desired state. + if !s.IsRunning() { + t.Fatal("State not running") + } + if s.Pid != i { + t.Fatalf("Pid %v, expected %v", s.Pid, i) + } + if s.ExitCode() != 0 { + t.Fatalf("ExitCode %v, expected 0", s.ExitCode()) + } + + // Now that it's running, a wait with WaitConditionNotRunning + // should block until we stop the container. It shouldn't take + // more than 100 milliseconds. + ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + exitWait := s.Wait(ctx, WaitConditionNotRunning) + + // Set the state to "Exited". + s.Lock() + s.SetStopped(&ExitStatus{ExitCode: i}) + s.Unlock() + + // Assert desired state. + if s.IsRunning() { + t.Fatal("State is running") + } + if s.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v", s.ExitCode(), i) + } + if s.Pid != 0 { + t.Fatalf("Pid %v, expected 0", s.Pid) + } + + // Receive the initialWait result. + if status := <-initialWait; status.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i, status.Err()) + } + + // Receive the exitWait result. + if status := <-exitWait; status.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i, status.Err()) + } + } + + // Set the state to dead and removed. + s.SetDead() + s.SetRemoved() + + // Wait for removed status or timeout. + if status := <-removalWait; status.ExitCode() != 2 { + // Should have the final exit code from the loop. + t.Fatalf("Removal wait exitCode %v, expected %v, err %q", status.ExitCode(), 2, status.Err()) + } +} + +func TestStateTimeoutWait(t *testing.T) { + s := NewState() + + s.Lock() + s.SetRunning(0, true) + s.Unlock() + + // Start a wait with a timeout. + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + waitC := s.Wait(ctx, WaitConditionNotRunning) + + // It should timeout *before* this 200ms timer does. + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 200 milliseconds") + case status := <-waitC: + t.Log("Stop callback fired") + // Should be a timeout error. + if status.Err() == nil { + t.Fatal("expected timeout error, got nil") + } + if status.ExitCode() != -1 { + t.Fatalf("expected exit code %v, got %v", -1, status.ExitCode()) + } + } + + s.Lock() + s.SetStopped(&ExitStatus{ExitCode: 0}) + s.Unlock() + + // Start another wait with a timeout. This one should return + // immediately. + ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + waitC = s.Wait(ctx, WaitConditionNotRunning) + + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 200 milliseconds") + case status := <-waitC: + t.Log("Stop callback fired") + if status.ExitCode() != 0 { + t.Fatalf("expected exit code %v, got %v, err %q", 0, status.ExitCode(), status.Err()) + } + } +} diff --git a/components/engine/container/state_unix.go b/components/engine/container/state_unix.go new file mode 100644 index 0000000000..a2fa5afc28 --- /dev/null +++ b/components/engine/container/state_unix.go @@ -0,0 +1,10 @@ +// +build linux freebsd + +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled +} diff --git a/components/engine/container/state_windows.go b/components/engine/container/state_windows.go new file mode 100644 index 0000000000..1229650efa --- /dev/null +++ b/components/engine/container/state_windows.go @@ -0,0 +1,7 @@ +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode +} diff --git a/components/engine/container/store.go b/components/engine/container/store.go new file mode 100644 index 0000000000..042fb1a349 --- /dev/null +++ b/components/engine/container/store.go @@ -0,0 +1,28 @@ +package container + +// StoreFilter defines a function to filter +// container in the store. +type StoreFilter func(*Container) bool + +// StoreReducer defines a function to +// manipulate containers in the store +type StoreReducer func(*Container) + +// Store defines an interface that +// any container store must implement. +type Store interface { + // Add appends a new container to the store. + Add(string, *Container) + // Get returns a container from the store by the identifier it was stored with. + Get(string) *Container + // Delete removes a container from the store by the identifier it was stored with. + Delete(string) + // List returns a list of containers from the store. + List() []*Container + // Size returns the number of containers in the store. + Size() int + // First returns the first container found in the store by a given filter. + First(StoreFilter) *Container + // ApplyAll calls the reducer function with every container in the store. + ApplyAll(StoreReducer) +} diff --git a/components/engine/container/stream/attach.go b/components/engine/container/stream/attach.go new file mode 100644 index 0000000000..5f63f88f39 --- /dev/null +++ b/components/engine/container/stream/attach.go @@ -0,0 +1,178 @@ +package stream + +import ( + "io" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/term" +) + +var defaultEscapeSequence = []byte{16, 17} // ctrl-p, ctrl-q + +// AttachConfig is the config struct used to attach a client to a stream's stdio +type AttachConfig struct { + // Tells the attach copier that the stream's stdin is a TTY and to look for + // escape sequences in stdin to detach from the stream. + // When true the escape sequence is not passed to the underlying stream + TTY bool + // Specifies the detach keys the client will be using + // Only useful when `TTY` is true + DetachKeys []byte + + // CloseStdin signals that once done, stdin for the attached stream should be closed + // For example, this would close the attached container's stdin. + CloseStdin bool + + // UseStd* indicate whether the client has requested to be connected to the + // given stream or not. These flags are used instead of checking Std* != nil + // at points before the client streams Std* are wired up. + UseStdin, UseStdout, UseStderr bool + + // CStd* are the streams directly connected to the container + CStdin io.WriteCloser + CStdout, CStderr io.ReadCloser + + // Provide client streams to wire up to + Stdin io.ReadCloser + Stdout, Stderr io.Writer +} + +// AttachStreams attaches the container's streams to the AttachConfig +func (c *Config) AttachStreams(cfg *AttachConfig) { + if cfg.UseStdin { + cfg.CStdin = c.StdinPipe() + } + + if cfg.UseStdout { + cfg.CStdout = c.StdoutPipe() + } + + if cfg.UseStderr { + cfg.CStderr = c.StderrPipe() + } +} + +// CopyStreams starts goroutines to copy data in and out to/from the container +func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) chan error { + var ( + wg sync.WaitGroup + errors = make(chan error, 3) + ) + + if cfg.Stdin != nil { + wg.Add(1) + } + + if cfg.Stdout != nil { + wg.Add(1) + } + + if cfg.Stderr != nil { + wg.Add(1) + } + + // Connect stdin of container to the attach stdin stream. + go func() { + if cfg.Stdin == nil { + return + } + logrus.Debug("attach: stdin: begin") + + var err error + if cfg.TTY { + _, err = copyEscapable(cfg.CStdin, cfg.Stdin, cfg.DetachKeys) + } else { + _, err = io.Copy(cfg.CStdin, cfg.Stdin) + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: stdin: %s", err) + errors <- err + } + if cfg.CloseStdin && !cfg.TTY { + cfg.CStdin.Close() + } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if cfg.CStdout != nil { + cfg.CStdout.Close() + } + if cfg.CStderr != nil { + cfg.CStderr.Close() + } + } + logrus.Debug("attach: stdin: end") + wg.Done() + }() + + attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { + if stream == nil { + return + } + + logrus.Debugf("attach: %s: begin", name) + _, err := io.Copy(stream, streamPipe) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: %s: %v", name, err) + errors <- err + } + // Make sure stdin gets closed + if cfg.Stdin != nil { + cfg.Stdin.Close() + } + streamPipe.Close() + logrus.Debugf("attach: %s: end", name) + wg.Done() + } + + go attachStream("stdout", cfg.Stdout, cfg.CStdout) + go attachStream("stderr", cfg.Stderr, cfg.CStderr) + + return promise.Go(func() error { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-ctx.Done(): + // close all pipes + if cfg.CStdin != nil { + cfg.CStdin.Close() + } + if cfg.CStdout != nil { + cfg.CStdout.Close() + } + if cfg.CStderr != nil { + cfg.CStderr.Close() + } + <-done + } + close(errors) + for err := range errors { + if err != nil { + return err + } + } + return nil + }) +} + +func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { + if len(keys) == 0 { + keys = defaultEscapeSequence + } + pr := term.NewEscapeProxy(src, keys) + defer src.Close() + + return io.Copy(dst, pr) +} diff --git a/components/engine/container/stream/streams.go b/components/engine/container/stream/streams.go new file mode 100644 index 0000000000..735bab5107 --- /dev/null +++ b/components/engine/container/stream/streams.go @@ -0,0 +1,146 @@ +package stream + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/broadcaster" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" +) + +// Config holds information about I/O streams managed together. +// +// config.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the streamConfig's active process. +// config.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". +type Config struct { + sync.WaitGroup + stdout *broadcaster.Unbuffered + stderr *broadcaster.Unbuffered + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +// NewConfig creates a stream config and initializes +// the standard err and standard out to new unbuffered broadcasters. +func NewConfig() *Config { + return &Config{ + stderr: new(broadcaster.Unbuffered), + stdout: new(broadcaster.Unbuffered), + } +} + +// Stdout returns the standard output in the configuration. +func (c *Config) Stdout() *broadcaster.Unbuffered { + return c.stdout +} + +// Stderr returns the standard error in the configuration. +func (c *Config) Stderr() *broadcaster.Unbuffered { + return c.stderr +} + +// Stdin returns the standard input in the configuration. +func (c *Config) Stdin() io.ReadCloser { + return c.stdin +} + +// StdinPipe returns an input writer pipe as an io.WriteCloser. +func (c *Config) StdinPipe() io.WriteCloser { + return c.stdinPipe +} + +// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new out pipe to the Stdout broadcaster. +// This will block stdout if unconsumed. +func (c *Config) StdoutPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stdout.Add(bytesPipe) + return bytesPipe +} + +// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new err pipe to the Stderr broadcaster. +// This will block stderr if unconsumed. +func (c *Config) StderrPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stderr.Add(bytesPipe) + return bytesPipe +} + +// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. +func (c *Config) NewInputPipes() { + c.stdin, c.stdinPipe = io.Pipe() +} + +// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. +func (c *Config) NewNopInputPipe() { + c.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) +} + +// CloseStreams ensures that the configured streams are properly closed. +func (c *Config) CloseStreams() error { + var errors []string + + if c.stdin != nil { + if err := c.stdin.Close(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) + } + } + + if err := c.stdout.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) + } + + if err := c.stderr.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + + return nil +} + +// CopyToPipe connects streamconfig with a libcontainerd.IOPipe +func (c *Config) CopyToPipe(iop libcontainerd.IOPipe) { + copyFunc := func(w io.Writer, r io.ReadCloser) { + c.Add(1) + go func() { + if _, err := pools.Copy(w, r); err != nil { + logrus.Errorf("stream copy error: %+v", err) + } + r.Close() + c.Done() + }() + } + + if iop.Stdout != nil { + copyFunc(c.Stdout(), iop.Stdout) + } + if iop.Stderr != nil { + copyFunc(c.Stderr(), iop.Stderr) + } + + if stdin := c.Stdin(); stdin != nil { + if iop.Stdin != nil { + go func() { + pools.Copy(iop.Stdin, stdin) + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("failed to close stdin: %+v", err) + } + }() + } + } +} diff --git a/components/engine/contrib/README.md b/components/engine/contrib/README.md new file mode 100644 index 0000000000..92b1d94433 --- /dev/null +++ b/components/engine/contrib/README.md @@ -0,0 +1,4 @@ +The `contrib` directory contains scripts, images, and other helpful things +which are not part of the core docker distribution. Please note that they +could be out of date, since they do not receive the same attention as the +rest of the repository. diff --git a/components/engine/contrib/REVIEWERS b/components/engine/contrib/REVIEWERS new file mode 100644 index 0000000000..18e05a3070 --- /dev/null +++ b/components/engine/contrib/REVIEWERS @@ -0,0 +1 @@ +Tianon Gravi (@tianon) diff --git a/components/engine/contrib/apparmor/main.go b/components/engine/contrib/apparmor/main.go new file mode 100644 index 0000000000..f4a2978b86 --- /dev/null +++ b/components/engine/contrib/apparmor/main.go @@ -0,0 +1,56 @@ +package main + +import ( + "fmt" + "log" + "os" + "path" + "text/template" + + "github.com/docker/docker/pkg/aaparser" +) + +type profileData struct { + Version int +} + +func main() { + if len(os.Args) < 2 { + log.Fatal("pass a filename to save the profile in.") + } + + // parse the arg + apparmorProfilePath := os.Args[1] + + version, err := aaparser.GetVersion() + if err != nil { + log.Fatal(err) + } + data := profileData{ + Version: version, + } + fmt.Printf("apparmor_parser is of version %+v\n", data) + + // parse the template + compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate) + if err != nil { + log.Fatalf("parsing template failed: %v", err) + } + + // make sure /etc/apparmor.d exists + if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil { + log.Fatal(err) + } + + f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + log.Fatal(err) + } + defer f.Close() + + if err := compiled.Execute(f, data); err != nil { + log.Fatalf("executing template failed: %v", err) + } + + fmt.Printf("created apparmor profile for version %+v at %q\n", data, apparmorProfilePath) +} diff --git a/components/engine/contrib/apparmor/template.go b/components/engine/contrib/apparmor/template.go new file mode 100644 index 0000000000..e5e1c8bed6 --- /dev/null +++ b/components/engine/contrib/apparmor/template.go @@ -0,0 +1,268 @@ +package main + +const dockerProfileTemplate = `@{DOCKER_GRAPH_PATH}=/var/lib/docker + +profile /usr/bin/docker (attach_disconnected, complain) { + # Prevent following links to these files during container setup. + deny /etc/** mkl, + deny /dev/** kl, + deny /sys/** mkl, + deny /proc/** mkl, + + mount -> @{DOCKER_GRAPH_PATH}/**, + mount -> /, + mount -> /proc/**, + mount -> /sys/**, + mount -> /run/docker/netns/**, + mount -> /.pivot_root[0-9]*/, + + / r, + + umount, + pivot_root, +{{if ge .Version 209000}} + signal (receive) peer=@{profile_name}, + signal (receive) peer=unconfined, + signal (send), +{{end}} + network, + capability, + owner /** rw, + @{DOCKER_GRAPH_PATH}/** rwl, + @{DOCKER_GRAPH_PATH}/linkgraph.db k, + @{DOCKER_GRAPH_PATH}/network/files/boltdb.db k, + @{DOCKER_GRAPH_PATH}/network/files/local-kv.db k, + @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/linkgraph.db k, + + # For non-root client use: + /dev/urandom r, + /dev/null rw, + /dev/pts/[0-9]* rw, + /run/docker.sock rw, + /proc/** r, + /proc/[0-9]*/attr/exec w, + /sys/kernel/mm/hugepages/ r, + /etc/localtime r, + /etc/ld.so.cache r, + /etc/passwd r, + +{{if ge .Version 209000}} + ptrace peer=@{profile_name}, + ptrace (read) peer=docker-default, + deny ptrace (trace) peer=docker-default, + deny ptrace peer=/usr/bin/docker///bin/ps, +{{end}} + + /usr/lib/** rm, + /lib/** rm, + + /usr/bin/docker pix, + /sbin/xtables-multi rCx, + /sbin/iptables rCx, + /sbin/modprobe rCx, + /sbin/auplink rCx, + /sbin/mke2fs rCx, + /sbin/tune2fs rCx, + /sbin/blkid rCx, + /bin/kmod rCx, + /usr/bin/xz rCx, + /bin/ps rCx, + /bin/tar rCx, + /bin/cat rCx, + /sbin/zfs rCx, + /sbin/apparmor_parser rCx, + +{{if ge .Version 209000}} + # Transitions + change_profile -> docker-*, + change_profile -> unconfined, +{{end}} + + profile /bin/cat (complain) { + /etc/ld.so.cache r, + /lib/** rm, + /dev/null rw, + /proc r, + /bin/cat mr, + + # For reading in 'docker stats': + /proc/[0-9]*/net/dev r, + } + profile /bin/ps (complain) { + /etc/ld.so.cache r, + /etc/localtime r, + /etc/passwd r, + /etc/nsswitch.conf r, + /lib/** rm, + /proc/[0-9]*/** r, + /dev/null rw, + /bin/ps mr, + +{{if ge .Version 209000}} + # We don't need ptrace so we'll deny and ignore the error. + deny ptrace (read, trace), +{{end}} + + # Quiet dac_override denials + deny capability dac_override, + deny capability dac_read_search, + deny capability sys_ptrace, + + /dev/tty r, + /proc/stat r, + /proc/cpuinfo r, + /proc/meminfo r, + /proc/uptime r, + /sys/devices/system/cpu/online r, + /proc/sys/kernel/pid_max r, + /proc/ r, + /proc/tty/drivers r, + } + profile /sbin/iptables (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability net_admin, + } + profile /sbin/auplink flags=(attach_disconnected, complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability sys_admin, + capability dac_override, + + @{DOCKER_GRAPH_PATH}/aufs/** rw, + @{DOCKER_GRAPH_PATH}/tmp/** rw, + # For user namespaces: + @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw, + + /sys/fs/aufs/** r, + /lib/** rm, + /apparmor/.null r, + /dev/null rw, + /etc/ld.so.cache r, + /sbin/auplink rm, + /proc/fs/aufs/** rw, + /proc/[0-9]*/mounts rw, + } + profile /sbin/modprobe /bin/kmod (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability sys_module, + /etc/ld.so.cache r, + /lib/** rm, + /dev/null rw, + /apparmor/.null rw, + /sbin/modprobe rm, + /bin/kmod rm, + /proc/cmdline r, + /sys/module/** r, + /etc/modprobe.d{/,/**} r, + } + # xz works via pipes, so we do not need access to the filesystem. + profile /usr/bin/xz (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + /etc/ld.so.cache r, + /lib/** rm, + /usr/bin/xz rm, + deny /proc/** rw, + deny /sys/** rw, + } + profile /sbin/xtables-multi (attach_disconnected, complain) { + /etc/ld.so.cache r, + /lib/** rm, + /sbin/xtables-multi rm, + /apparmor/.null w, + /dev/null rw, + + /proc r, + + capability net_raw, + capability net_admin, + network raw, + } + profile /sbin/zfs (attach_disconnected, complain) { + file, + capability, + } + profile /sbin/mke2fs (complain) { + /sbin/mke2fs rm, + + /lib/** rm, + + /apparmor/.null w, + + /etc/ld.so.cache r, + /etc/mke2fs.conf r, + /etc/mtab r, + + /dev/dm-* rw, + /dev/urandom r, + /dev/null rw, + + /proc/swaps r, + /proc/[0-9]*/mounts r, + } + profile /sbin/tune2fs (complain) { + /sbin/tune2fs rm, + + /lib/** rm, + + /apparmor/.null w, + + /etc/blkid.conf r, + /etc/mtab r, + /etc/ld.so.cache r, + + /dev/null rw, + /dev/.blkid.tab r, + /dev/dm-* rw, + + /proc/swaps r, + /proc/[0-9]*/mounts r, + } + profile /sbin/blkid (complain) { + /sbin/blkid rm, + + /lib/** rm, + /apparmor/.null w, + + /etc/ld.so.cache r, + /etc/blkid.conf r, + + /dev/null rw, + /dev/.blkid.tab rl, + /dev/.blkid.tab* rwl, + /dev/dm-* r, + + /sys/devices/virtual/block/** r, + + capability mknod, + + mount -> @{DOCKER_GRAPH_PATH}/**, + } + profile /sbin/apparmor_parser (complain) { + /sbin/apparmor_parser rm, + + /lib/** rm, + + /etc/ld.so.cache r, + /etc/apparmor/** r, + /etc/apparmor.d/** r, + /etc/apparmor.d/cache/** w, + + /dev/null rw, + + /sys/kernel/security/apparmor/** r, + /sys/kernel/security/apparmor/.replace w, + + /proc/[0-9]*/mounts r, + /proc/sys/kernel/osrelease r, + /proc r, + + capability mac_admin, + } +}` diff --git a/components/engine/contrib/builder/deb/aarch64/build.sh b/components/engine/contrib/builder/deb/aarch64/build.sh new file mode 100755 index 0000000000..f8b25067bd --- /dev/null +++ b/components/engine/contrib/builder/deb/aarch64/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/components/engine/contrib/builder/deb/aarch64/debian-jessie/Dockerfile b/components/engine/contrib/builder/deb/aarch64/debian-jessie/Dockerfile new file mode 100644 index 0000000000..e165da4978 --- /dev/null +++ b/components/engine/contrib/builder/deb/aarch64/debian-jessie/Dockerfile @@ -0,0 +1,25 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/debian:jessie + +RUN echo deb http://ftp.debian.org/debian jessie-backports main > /etc/apt/sources.list.d/backports.list +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev pkg-config vim-common libsystemd-journal-dev golang-1.6-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100 + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.7.5 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux seccomp +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/components/engine/contrib/builder/deb/aarch64/debian-stretch/Dockerfile b/components/engine/contrib/builder/deb/aarch64/debian-stretch/Dockerfile new file mode 100644 index 0000000000..00546e256e --- /dev/null +++ b/components/engine/contrib/builder/deb/aarch64/debian-stretch/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/debian:stretch + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev pkg-config vim-common libsystemd-dev golang-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.7.5 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux seccomp +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/components/engine/contrib/builder/deb/aarch64/generate.sh b/components/engine/contrib/builder/deb/aarch64/generate.sh new file mode 100755 index 0000000000..647bf8fc28 --- /dev/null +++ b/components/engine/contrib/builder/deb/aarch64/generate.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-trusty +# to only update ubuntu-trusty/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it +# +# Note: non-LTS versions are not guaranteed to work. + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="aarch64/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! + # + + FROM $from + + EOF + + dockerBuildTags='apparmor pkcs11 selinux' + runcBuildTags='apparmor selinux' + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + + case "$suite" in + trusty) + packages+=( libsystemd-journal-dev ) + # aarch64 doesn't have an official downloadable binary for go. + # And gccgo for trusty only includes Go 1.2 implementation which + # is too old to build current go source, fortunately trusty has + # golang-1.6-go package can be used as bootstrap. + packages+=( golang-1.6-go ) + ;; + jessie) + packages+=( libsystemd-journal-dev ) + # aarch64 doesn't have an official downloadable binary for go. + # And gccgo for jessie only includes Go 1.2 implementation which + # is too old to build current go source, fortunately jessie backports + # has golang-1.6-go package can be used as bootstrap. + packages+=( golang-1.6-go libseccomp-dev ) + + dockerBuildTags="$dockerBuildTags seccomp" + runcBuildTags="$runcBuildTags seccomp" + ;; + stretch|xenial) + packages+=( libsystemd-dev ) + packages+=( golang-go libseccomp-dev ) + + dockerBuildTags="$dockerBuildTags seccomp" + runcBuildTags="$runcBuildTags seccomp" + ;; + *) + echo "Unsupported distro:" $distro:$suite + rm -fr "$version" + exit 1 + ;; + esac + + case "$suite" in + jessie) + echo 'RUN echo deb http://ftp.debian.org/debian jessie-backports main > /etc/apt/sources.list.d/backports.list' >> "$version/Dockerfile" + ;; + *) + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + case "$suite" in + jessie|trusty) + echo 'RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) + ;; + esac + + echo "# Install Go" >> "$version/Dockerfile" + echo "# aarch64 doesn't have official go binaries, so use the version of go installed from" >> "$version/Dockerfile" + echo "# the image to build go from source." >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.aarch64 >> "$version/Dockerfile" + echo 'RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \' >> "$version/Dockerfile" + echo ' && cd /usr/src/go/src \' >> "$version/Dockerfile" + echo ' && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo 'ENV PATH /usr/src/go/bin:$PATH' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo "ENV AUTO_GOPATH 1" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo "ENV DOCKER_BUILDTAGS $dockerBuildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/components/engine/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile b/components/engine/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000000..f0fc346e2d --- /dev/null +++ b/components/engine/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile @@ -0,0 +1,24 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev pkg-config vim-common libsystemd-journal-dev golang-1.6-go --no-install-recommends && rm -rf /var/lib/apt/lists/* + +RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100 + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.8.1 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/components/engine/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile b/components/engine/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000000..c379e651cd --- /dev/null +++ b/components/engine/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev pkg-config vim-common libsystemd-dev golang-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.8.1 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux seccomp +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/components/engine/contrib/builder/deb/amd64/README.md b/components/engine/contrib/builder/deb/amd64/README.md new file mode 100644 index 0000000000..20a0ff1006 --- /dev/null +++ b/components/engine/contrib/builder/deb/amd64/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-deb` + +This image's tags contain the dependencies for building Docker `.deb`s for each of the Debian-based platforms Docker targets. + +To add new tags, see [`contrib/builder/deb/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/deb/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/components/engine/contrib/builder/deb/amd64/build.sh b/components/engine/contrib/builder/deb/amd64/build.sh new file mode 100755 index 0000000000..f8b25067bd --- /dev/null +++ b/components/engine/contrib/builder/deb/amd64/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/components/engine/contrib/builder/deb/amd64/debian-jessie/Dockerfile b/components/engine/contrib/builder/deb/amd64/debian-jessie/Dockerfile new file mode 100644 index 0000000000..0ade89d15d --- /dev/null +++ b/components/engine/contrib/builder/deb/amd64/debian-jessie/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/components/engine/contrib/builder/deb/amd64/debian-stretch/Dockerfile b/components/engine/contrib/builder/deb/amd64/debian-stretch/Dockerfile new file mode 100644 index 0000000000..be3c19f036 --- /dev/null +++ b/components/engine/contrib/builder/deb/amd64/debian-stretch/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:stretch + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/components/engine/contrib/builder/deb/amd64/debian-wheezy/Dockerfile b/components/engine/contrib/builder/deb/amd64/debian-wheezy/Dockerfile new file mode 100644 index 0000000000..9b8cade500 --- /dev/null +++ b/components/engine/contrib/builder/deb/amd64/debian-wheezy/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:wheezy-backports + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + +RUN apt-get update && apt-get install -y -t wheezy-backports btrfs-tools --no-install-recommends && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y apparmor bash-completion build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/components/engine/contrib/builder/deb/amd64/generate.sh b/components/engine/contrib/builder/deb/amd64/generate.sh new file mode 100755 index 0000000000..38972baadd --- /dev/null +++ b/components/engine/contrib/builder/deb/amd64/generate.sh @@ -0,0 +1,131 @@ +#!/usr/bin/env bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + case "$from" in + debian:wheezy) + # add -backports, like our users have to + from+='-backports' + ;; + esac + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + if [ "$distro" = "debian" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + # allow replacing httpredir or deb mirror + ARG APT_MIRROR=deb.debian.org + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + EOF + + if [ "$suite" = "wheezy" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + EOF + fi + + echo "" >> "$version/Dockerfile" + fi + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + # packaging for "sd-journal.h" and libraries varies + case "$suite" in + wheezy) ;; + jessie|trusty) packages+=( libsystemd-journal-dev ) ;; + *) packages+=( libsystemd-dev ) ;; + esac + + # debian wheezy does not have the right libseccomp libs + # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( + case "$suite" in + wheezy|jessie|trusty) + packages=( "${packages[@]/libseccomp-dev}" ) + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + if [ "$suite" = 'wheezy' ]; then + # pull a couple packages from backports explicitly + # (build failures otherwise) + backportsPackages=( btrfs-tools ) + for pkg in "${backportsPackages[@]}"; do + packages=( "${packages[@]/$pkg}" ) + done + echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + fi + + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/components/engine/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile b/components/engine/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000000..2d8b196065 --- /dev/null +++ b/components/engine/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/components/engine/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile b/components/engine/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000000..f533a13907 --- /dev/null +++ b/components/engine/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/components/engine/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile b/components/engine/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000000..8b75cb8766 --- /dev/null +++ b/components/engine/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/components/engine/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile b/components/engine/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile new file mode 100644 index 0000000000..9a19e4b132 --- /dev/null +++ b/components/engine/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:zesty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/components/engine/contrib/builder/deb/armhf/debian-jessie/Dockerfile b/components/engine/contrib/builder/deb/armhf/debian-jessie/Dockerfile new file mode 100644 index 0000000000..f72e697a19 --- /dev/null +++ b/components/engine/contrib/builder/deb/armhf/debian-jessie/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/components/engine/contrib/builder/deb/armhf/generate.sh b/components/engine/contrib/builder/deb/armhf/generate.sh new file mode 100755 index 0000000000..9bb943ed72 --- /dev/null +++ b/components/engine/contrib/builder/deb/armhf/generate.sh @@ -0,0 +1,140 @@ +#!/usr/bin/env bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + case "$from" in + raspbian:jessie) + from="resin/rpi-raspbian:jessie" + ;; + *) + from="armhf/$from" + ;; + esac + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + if [[ "$distro" = "debian" || "$distro" = "raspbian" ]]; then + cat >> "$version/Dockerfile" <<-'EOF' + # allow replacing httpredir or deb mirror + ARG APT_MIRROR=deb.debian.org + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + EOF + + if [ "$suite" = "wheezy" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + EOF + fi + + echo "" >> "$version/Dockerfile" + fi + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + # packaging for "sd-journal.h" and libraries varies + case "$suite" in + wheezy) ;; + jessie|trusty) packages+=( libsystemd-journal-dev ) ;; + *) packages+=( libsystemd-dev ) ;; + esac + + # debian wheezy does not have the right libseccomp libs + # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( + case "$suite" in + wheezy|jessie|trusty) + packages=( "${packages[@]/libseccomp-dev}" ) + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + if [ "$suite" = 'wheezy' ]; then + # pull a couple packages from backports explicitly + # (build failures otherwise) + backportsPackages=( btrfs-tools ) + for pkg in "${backportsPackages[@]}"; do + packages=( "${packages[@]/$pkg}" ) + done + echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + fi + + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.armhf >> "$version/Dockerfile" + if [ "$distro" == 'raspbian' ]; + then + cat <> "$version/Dockerfile" +# GOARM is the ARM architecture version which is unrelated to the above Golang version +ENV GOARM 6 +EOF + fi + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/components/engine/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile b/components/engine/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile new file mode 100644 index 0000000000..3664834d70 --- /dev/null +++ b/components/engine/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM resin/rpi-raspbian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +# GOARM is the ARM architecture version which is unrelated to the above Golang version +ENV GOARM 6 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/components/engine/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile b/components/engine/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000000..f3743890a5 --- /dev/null +++ b/components/engine/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/components/engine/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile b/components/engine/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000000..6814682563 --- /dev/null +++ b/components/engine/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/components/engine/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile b/components/engine/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000000..3cb2b31967 --- /dev/null +++ b/components/engine/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/components/engine/contrib/builder/deb/ppc64le/build.sh b/components/engine/contrib/builder/deb/ppc64le/build.sh new file mode 100755 index 0000000000..83dbf943fc --- /dev/null +++ b/components/engine/contrib/builder/deb/ppc64le/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/components/engine/contrib/builder/deb/ppc64le/generate.sh b/components/engine/contrib/builder/deb/ppc64le/generate.sh new file mode 100755 index 0000000000..6f6f348b5a --- /dev/null +++ b/components/engine/contrib/builder/deb/ppc64le/generate.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-xenial +# to only update ubuntu-xenial/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="ppc64le/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! + # + + FROM $from + + EOF + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + + case "$suite" in + trusty) + packages+=( libsystemd-journal-dev ) + ;; + *) + # libseccomp isn't available until ubuntu xenial and is required for "seccomp.h" & "libseccomp.so" + packages+=( libseccomp-dev ) + packages+=( libsystemd-dev ) + ;; + esac + + # buildtags + case "$suite" in + # trusty has no seccomp package + trusty) + runcBuildTags="apparmor selinux" + ;; + # ppc64le support was backported into libseccomp 2.2.3-2, + # so enable seccomp by default + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.ppc64le >> "$version/Dockerfile" + echo 'RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/components/engine/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile b/components/engine/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000000..77bb0f1f2d --- /dev/null +++ b/components/engine/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/components/engine/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile b/components/engine/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000000..1bf7e9120a --- /dev/null +++ b/components/engine/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/components/engine/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile b/components/engine/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000000..b94a4de875 --- /dev/null +++ b/components/engine/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/components/engine/contrib/builder/deb/s390x/build.sh b/components/engine/contrib/builder/deb/s390x/build.sh new file mode 100755 index 0000000000..f8b25067bd --- /dev/null +++ b/components/engine/contrib/builder/deb/s390x/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/components/engine/contrib/builder/deb/s390x/generate.sh b/components/engine/contrib/builder/deb/s390x/generate.sh new file mode 100755 index 0000000000..5cf05f32e0 --- /dev/null +++ b/components/engine/contrib/builder/deb/s390x/generate.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-xenial +# to only update ubuntu-xenial/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="s390x/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! + # + + FROM $from + + EOF + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + pkg-config # for detecting things like libsystemd-journal dynamically + libsystemd-dev + vim-common # tini dep + ) + + case "$suite" in + # s390x needs libseccomp 2.3.1 + xenial) + # Ubuntu Xenial has libseccomp 2.2.3 + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor selinux seccomp" + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.s390x >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/components/engine/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile b/components/engine/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000000..e96c0cb241 --- /dev/null +++ b/components/engine/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! +# + +FROM s390x/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/components/engine/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile b/components/engine/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000000..0faa6e76f9 --- /dev/null +++ b/components/engine/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! +# + +FROM s390x/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/components/engine/contrib/builder/rpm/amd64/README.md b/components/engine/contrib/builder/rpm/amd64/README.md new file mode 100644 index 0000000000..5f2e888c7a --- /dev/null +++ b/components/engine/contrib/builder/rpm/amd64/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-rpm` + +This image's tags contain the dependencies for building Docker `.rpm`s for each of the RPM-based platforms Docker targets. + +To add new tags, see [`contrib/builder/rpm/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/rpm/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/components/engine/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile b/components/engine/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile new file mode 100644 index 0000000000..348ddbaa58 --- /dev/null +++ b/components/engine/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM amazonlinux:latest + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/components/engine/contrib/builder/rpm/amd64/build.sh b/components/engine/contrib/builder/rpm/amd64/build.sh new file mode 100755 index 0000000000..1e3565a34a --- /dev/null +++ b/components/engine/contrib/builder/rpm/amd64/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/components/engine/contrib/builder/rpm/amd64/centos-7/Dockerfile b/components/engine/contrib/builder/rpm/amd64/centos-7/Dockerfile new file mode 100644 index 0000000000..8a3312c5ec --- /dev/null +++ b/components/engine/contrib/builder/rpm/amd64/centos-7/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM centos:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/components/engine/contrib/builder/rpm/amd64/fedora-24/Dockerfile b/components/engine/contrib/builder/rpm/amd64/fedora-24/Dockerfile new file mode 100644 index 0000000000..8251defa28 --- /dev/null +++ b/components/engine/contrib/builder/rpm/amd64/fedora-24/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM fedora:24 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/components/engine/contrib/builder/rpm/amd64/fedora-25/Dockerfile b/components/engine/contrib/builder/rpm/amd64/fedora-25/Dockerfile new file mode 100644 index 0000000000..aaf5736fb0 --- /dev/null +++ b/components/engine/contrib/builder/rpm/amd64/fedora-25/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM fedora:25 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/components/engine/contrib/builder/rpm/amd64/generate.sh b/components/engine/contrib/builder/rpm/amd64/generate.sh new file mode 100755 index 0000000000..5239a2bf81 --- /dev/null +++ b/components/engine/contrib/builder/rpm/amd64/generate.sh @@ -0,0 +1,188 @@ +#!/usr/bin/env bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh centos-7 +# to only update centos-7/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + installer=yum + + if [[ "$distro" == "fedora" ]]; then + installer=dnf + fi + if [[ "$distro" == "photon" ]]; then + installer=tdnf + fi + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags='pkcs11' + runcBuildTags= + + case "$from" in + oraclelinux:6) + # We need a known version of the kernel-uek-devel headers to set CGO_CPPFLAGS, so grab the UEKR4 GA version + # This requires using yum-config-manager from yum-utils to enable the UEKR4 yum repo + echo "RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4" >> "$version/Dockerfile" + echo "RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + fedora:*) + echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile" + ;; + *) ;; + esac + + case "$from" in + centos:*|amazonlinux:latest) + # get "Development Tools" packages dependencies + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + + if [[ "$version" == "centos-7" ]]; then + echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" + fi + ;; + oraclelinux:*) + # get "Development Tools" packages and dependencies + # we also need yum-utils for yum-config-manager to pull the latest repo file + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + ;; + opensuse:*) + # get rpm-build and curl packages and dependencies + echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile" + ;; + photon:*) + echo "RUN ${installer} install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp libltdl-devel elfutils" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" + ;; + esac + + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + libtool-ltdl-devel # for pkcs11 "ltdl.h" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + vim-common # tini build + ) + + case "$from" in + oraclelinux:7) + # Enable the optional repository + packages=( --enablerepo=ol7_optional_latest "${packages[*]}" ) + ;; + esac + + case "$from" in + oraclelinux:6|amazonlinux:latest) + # doesn't use systemd, doesn't have a devel package for it + packages=( "${packages[@]/systemd-devel}" ) + ;; + esac + + # opensuse & oraclelinx:6 do not have the right libseccomp libs + case "$from" in + opensuse:*|oraclelinux:6) + packages=( "${packages[@]/libseccomp-devel}" ) + runcBuildTags="selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + ;; + esac + + case "$from" in + opensuse:*) + packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + packages=( "${packages[@]/vim-common/vim}" ) + if [[ "$from" == "opensuse:13."* ]]; then + packages+=( systemd-rpm-macros ) + fi + + # use zypper + echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" + ;; + photon:*) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + esac + + echo >> "$version/Dockerfile" + + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + case "$from" in + oraclelinux:6) + # We need to set the CGO_CPPFLAGS environment to use the updated UEKR4 headers with all the userns stuff. + # The ordering is very important and should not be changed. + echo 'ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) ;; + esac + + +done diff --git a/components/engine/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile b/components/engine/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile new file mode 100644 index 0000000000..5e408eeffd --- /dev/null +++ b/components/engine/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM opensuse:13.2 + +RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build +RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel systemd-devel tar git cmake vim systemd-rpm-macros + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 selinux +ENV RUNC_BUILDTAGS selinux + diff --git a/components/engine/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile b/components/engine/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile new file mode 100644 index 0000000000..1e78c9cbd3 --- /dev/null +++ b/components/engine/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile @@ -0,0 +1,28 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM oraclelinux:6 + +RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4 +RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 selinux +ENV RUNC_BUILDTAGS selinux + +ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include + diff --git a/components/engine/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile b/components/engine/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile new file mode 100644 index 0000000000..f4f5a8e36a --- /dev/null +++ b/components/engine/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM oraclelinux:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/components/engine/contrib/builder/rpm/amd64/photon-1.0/Dockerfile b/components/engine/contrib/builder/rpm/amd64/photon-1.0/Dockerfile new file mode 100644 index 0000000000..853c73b1a6 --- /dev/null +++ b/components/engine/contrib/builder/rpm/amd64/photon-1.0/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM photon:1.0 + +RUN tdnf install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp libltdl-devel elfutils +RUN tdnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.1 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/components/engine/contrib/builder/rpm/armhf/build.sh b/components/engine/contrib/builder/rpm/armhf/build.sh new file mode 100755 index 0000000000..1e3565a34a --- /dev/null +++ b/components/engine/contrib/builder/rpm/armhf/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/components/engine/contrib/builder/rpm/armhf/centos-7/Dockerfile b/components/engine/contrib/builder/rpm/armhf/centos-7/Dockerfile new file mode 100644 index 0000000000..3c9b93d881 --- /dev/null +++ b/components/engine/contrib/builder/rpm/armhf/centos-7/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/armhf/generate.sh"! +# + +FROM multiarch/centos:7.2.1511-armhfp-clean + +RUN yum install -y yum-plugin-ovl +RUN yum groupinstall --skip-broken -y "Development Tools" +RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.4 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/components/engine/contrib/builder/rpm/armhf/generate.sh b/components/engine/contrib/builder/rpm/armhf/generate.sh new file mode 100755 index 0000000000..f7c742ebdd --- /dev/null +++ b/components/engine/contrib/builder/rpm/armhf/generate.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash +# vim: set ts=4 sw=4 noet : + +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh centos-7 +# to only update centos-7/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + installer=yum + + if [[ "$distro" == "fedora" ]]; then + installer=dnf + fi + + mkdir -p "$version" + + case "$from" in + centos:*) + # get "Development Tools" packages dependencies + image="multiarch/centos:7.2.1511-armhfp-clean" + ;; + *) + image="${from}" + ;; + esac + + echo "$version -> FROM $image" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/armhf/generate.sh"! + # + + FROM $image + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags='pkcs11' + runcBuildTags= + + case "$from" in + fedora:*) + echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile" + ;; + *) ;; + esac + + case "$from" in + centos:*) + # get "Development Tools" packages dependencies + + echo 'RUN yum install -y yum-plugin-ovl' >> "$version/Dockerfile" + + echo 'RUN yum groupinstall --skip-broken -y "Development Tools"' >> "$version/Dockerfile" + + if [[ "$version" == "centos-7" ]]; then + echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" + fi + ;; + *) + echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" + ;; + esac + + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + libtool-ltdl-devel # for pkcs11 "ltdl.h" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + sqlite-devel # for "sqlite3.h" + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + vim-common # tini build + ) + + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.armhf >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" +done diff --git a/components/engine/contrib/builder/rpm/ppc64le/build.sh b/components/engine/contrib/builder/rpm/ppc64le/build.sh new file mode 100755 index 0000000000..1e3565a34a --- /dev/null +++ b/components/engine/contrib/builder/rpm/ppc64le/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/components/engine/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile b/components/engine/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile new file mode 100644 index 0000000000..491b99d059 --- /dev/null +++ b/components/engine/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/ppc64le/generate.sh"! +# + +FROM ppc64le/fedora:24 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake + +ENV GO_VERSION 1.8.1 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/components/engine/contrib/builder/rpm/ppc64le/generate.sh b/components/engine/contrib/builder/rpm/ppc64le/generate.sh new file mode 100755 index 0000000000..b9f657c12d --- /dev/null +++ b/components/engine/contrib/builder/rpm/ppc64le/generate.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh +# to only update fedora-23/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + installer=yum + if [[ "$distro" == "fedora" ]]; then + installer=dnf + fi + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/ppc64le/generate.sh"! + # + + FROM ppc64le/$from + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags='pkcs11' + runcBuildTags= + + case "$from" in + # add centos and opensuse tools install bits later + fedora:*) + echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile" + echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" + ;; + esac + + # this list is sorted alphabetically; please keep it that way + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + libtool-ltdl-devel # for pkcs11 "ltdl.h" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + ) + + # opensuse does not have the right libseccomp libs + case "$from" in + # add opensuse libseccomp package substitution when adding build support + *) + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + ;; + esac + + case "$from" in + # add opensuse btrfs package substitution when adding build support + *) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + esac + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.ppc64le >> "$version/Dockerfile" + echo 'RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + +done diff --git a/components/engine/contrib/check-config.sh b/components/engine/contrib/check-config.sh new file mode 100755 index 0000000000..88eb8aa753 --- /dev/null +++ b/components/engine/contrib/check-config.sh @@ -0,0 +1,360 @@ +#!/usr/bin/env bash +set -e + +EXITCODE=0 + +# bits of this were adapted from lxc-checkconfig +# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in + +possibleConfigs=( + '/proc/config.gz' + "/boot/config-$(uname -r)" + "/usr/src/linux-$(uname -r)/.config" + '/usr/src/linux/.config' +) + +if [ $# -gt 0 ]; then + CONFIG="$1" +else + : ${CONFIG:="${possibleConfigs[0]}"} +fi + +if ! command -v zgrep &> /dev/null; then + zgrep() { + zcat "$2" | grep "$1" + } +fi + +kernelVersion="$(uname -r)" +kernelMajor="${kernelVersion%%.*}" +kernelMinor="${kernelVersion#$kernelMajor.}" +kernelMinor="${kernelMinor%%.*}" + +is_set() { + zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null +} +is_set_in_kernel() { + zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null +} +is_set_as_module() { + zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null +} + +color() { + local codes=() + if [ "$1" = 'bold' ]; then + codes=( "${codes[@]}" '1' ) + shift + fi + if [ "$#" -gt 0 ]; then + local code= + case "$1" in + # see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors + black) code=30 ;; + red) code=31 ;; + green) code=32 ;; + yellow) code=33 ;; + blue) code=34 ;; + magenta) code=35 ;; + cyan) code=36 ;; + white) code=37 ;; + esac + if [ "$code" ]; then + codes=( "${codes[@]}" "$code" ) + fi + fi + local IFS=';' + echo -en '\033['"${codes[*]}"'m' +} +wrap_color() { + text="$1" + shift + color "$@" + echo -n "$text" + color reset + echo +} + +wrap_good() { + echo "$(wrap_color "$1" white): $(wrap_color "$2" green)" +} +wrap_bad() { + echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)" +} +wrap_warning() { + wrap_color >&2 "$*" red +} + +check_flag() { + if is_set_in_kernel "$1"; then + wrap_good "CONFIG_$1" 'enabled' + elif is_set_as_module "$1"; then + wrap_good "CONFIG_$1" 'enabled (as module)' + else + wrap_bad "CONFIG_$1" 'missing' + EXITCODE=1 + fi +} + +check_flags() { + for flag in "$@"; do + echo -n "- "; check_flag "$flag" + done +} + +check_command() { + if command -v "$1" >/dev/null 2>&1; then + wrap_good "$1 command" 'available' + else + wrap_bad "$1 command" 'missing' + EXITCODE=1 + fi +} + +check_device() { + if [ -c "$1" ]; then + wrap_good "$1" 'present' + else + wrap_bad "$1" 'missing' + EXITCODE=1 + fi +} + +check_distro_userns() { + source /etc/os-release 2>/dev/null || /bin/true + if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then + # this is a CentOS7 or RHEL7 system + grep -q "user_namespace.enable=1" /proc/cmdline || { + # no user namespace support enabled + wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)" + EXITCODE=1 + } + fi +} + +if [ ! -e "$CONFIG" ]; then + wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..." + for tryConfig in "${possibleConfigs[@]}"; do + if [ -e "$tryConfig" ]; then + CONFIG="$tryConfig" + break + fi + done + if [ ! -e "$CONFIG" ]; then + wrap_warning "error: cannot find kernel config" + wrap_warning " try running this script again, specifying the kernel config:" + wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config" + exit 1 + fi +fi + +wrap_color "info: reading kernel config from $CONFIG ..." white +echo + +echo 'Generally Necessary:' + +echo -n '- ' +cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)" +cgroupDir="$(dirname "$cgroupSubsystemDir")" +if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then + echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" +else + if [ "$cgroupSubsystemDir" ]; then + echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]" + else + echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')" + fi + EXITCODE=1 + echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" +fi + +if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + echo -n '- ' + if command -v apparmor_parser &> /dev/null; then + echo "$(wrap_good 'apparmor' 'enabled and tools installed')" + else + echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')" + echo -n ' ' + if command -v apt-get &> /dev/null; then + echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')" + elif command -v yum &> /dev/null; then + echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')" + else + echo "$(wrap_color '(look for an "apparmor" package for your distribution)')" + fi + EXITCODE=1 + fi +fi + +flags=( + NAMESPACES {NET,PID,IPC,UTS}_NS + CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG + KEYS + VETH BRIDGE BRIDGE_NETFILTER + NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE + NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK,IPVS} + IP_NF_NAT NF_NAT NF_NAT_NEEDED + + # required for bind-mounting /dev/mqueue into containers + POSIX_MQUEUE +) +check_flags "${flags[@]}" +if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -lt 8 ]; then + check_flags DEVPTS_MULTIPLE_INSTANCES +fi + +echo + +echo 'Optional Features:' +{ + check_flags USER_NS + check_distro_userns +} +{ + check_flags SECCOMP +} +{ + check_flags CGROUP_PIDS +} +{ + CODE=${EXITCODE} + check_flags MEMCG_SWAP MEMCG_SWAP_ENABLED + if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then + echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)" + EXITCODE=${CODE} + elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then + echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)" + fi +} +{ + if is_set LEGACY_VSYSCALL_NATIVE; then + echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled' + echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)" + elif is_set LEGACY_VSYSCALL_EMULATE; then + echo -n "- "; wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled' + elif is_set LEGACY_VSYSCALL_NONE; then + echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled' + echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)" + echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)" + echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)" + echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)" + # else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do + # not have these LEGACY_VSYSCALL options and are effectively + # LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably + # effectively LEGACY_VSYSCALL_NATIVE. + fi +} + +if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -le 5 ]; then + check_flags MEMCG_KMEM +fi + +if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 18 ]; then + check_flags RESOURCE_COUNTERS +fi + +if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 13 ]; then + netprio=NETPRIO_CGROUP +else + netprio=CGROUP_NET_PRIO +fi + +flags=( + BLK_CGROUP BLK_DEV_THROTTLING IOSCHED_CFQ CFQ_GROUP_IOSCHED + CGROUP_PERF + CGROUP_HUGETLB + NET_CLS_CGROUP $netprio + CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED + IP_VS + IP_VS_NFCT + IP_VS_RR +) +check_flags "${flags[@]}" + +if ! is_set EXT4_USE_FOR_EXT2; then + check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY + if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then + echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)" + fi +fi + +check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY +if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then + if is_set EXT4_USE_FOR_EXT2; then + echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)" + else + echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)" + fi +fi + +echo '- Network Drivers:' +echo ' - "'$(wrap_color 'overlay' blue)'":' +check_flags VXLAN | sed 's/^/ /' +echo ' Optional (for encrypted networks):' +check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \ + XFRM XFRM_USER XFRM_ALGO INET_ESP INET_XFRM_MODE_TRANSPORT | sed 's/^/ /' +echo ' - "'$(wrap_color 'ipvlan' blue)'":' +check_flags IPVLAN | sed 's/^/ /' +echo ' - "'$(wrap_color 'macvlan' blue)'":' +check_flags MACVLAN DUMMY | sed 's/^/ /' +echo ' - "'$(wrap_color 'ftp,tftp client in container' blue)'":' +check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /' + +# only fail if no storage drivers available +CODE=${EXITCODE} +EXITCODE=0 +STORAGE=1 + +echo '- Storage Drivers:' +echo ' - "'$(wrap_color 'aufs' blue)'":' +check_flags AUFS_FS | sed 's/^/ /' +if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then + echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" +fi +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'btrfs' blue)'":' +check_flags BTRFS_FS | sed 's/^/ /' +check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'devicemapper' blue)'":' +check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'overlay' blue)'":' +check_flags OVERLAY_FS | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'zfs' blue)'":' +echo -n " - "; check_device /dev/zfs +echo -n " - "; check_command zfs +echo -n " - "; check_command zpool +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +EXITCODE=$CODE +[ "$STORAGE" = 1 ] && EXITCODE=1 + +echo + +check_limit_over() +{ + if [ $(cat "$1") -le "$2" ]; then + wrap_bad "- $1" "$(cat $1)" + wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black + EXITCODE=1 + else + wrap_good "- $1" "$(cat $1)" + fi +} + +echo 'Limits:' +check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000 +echo + +exit $EXITCODE diff --git a/components/engine/contrib/completion/REVIEWERS b/components/engine/contrib/completion/REVIEWERS new file mode 100644 index 0000000000..03ee2dde3d --- /dev/null +++ b/components/engine/contrib/completion/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/components/engine/contrib/completion/bash/docker b/components/engine/contrib/completion/bash/docker new file mode 100644 index 0000000000..79209c2941 --- /dev/null +++ b/components/engine/contrib/completion/bash/docker @@ -0,0 +1,4629 @@ +#!/usr/bin/env bash +# +# bash completion file for core docker commands +# +# This script provides completion of: +# - commands and their options +# - container ids and names +# - image repos and tags +# - filepaths +# +# To enable the completions either: +# - place this file in /etc/bash_completion.d +# or +# - copy this file to e.g. ~/.docker-completion.sh and add the line +# below to your .bashrc after bash completion features are loaded +# . ~/.docker-completion.sh +# +# Configuration: +# +# For several commands, the amount of completions can be configured by +# setting environment variables. +# +# DOCKER_COMPLETION_SHOW_CONTAINER_IDS +# DOCKER_COMPLETION_SHOW_NETWORK_IDS +# DOCKER_COMPLETION_SHOW_NODE_IDS +# DOCKER_COMPLETION_SHOW_PLUGIN_IDS +# DOCKER_COMPLETION_SHOW_SECRET_IDS +# DOCKER_COMPLETION_SHOW_SERVICE_IDS +# "no" - Show names only (default) +# "yes" - Show names and ids +# +# You can tailor completion for the "events", "history", "inspect", "run", +# "rmi" and "save" commands by settings the following environment +# variables: +# +# DOCKER_COMPLETION_SHOW_IMAGE_IDS +# "none" - Show names only (default) +# "non-intermediate" - Show names and ids, but omit intermediate image IDs +# "all" - Show names and ids, including intermediate image IDs +# +# DOCKER_COMPLETION_SHOW_TAGS +# "yes" - include tags in completion options (default) +# "no" - don't include tags in completion options + +# +# Note: +# Currently, the completions will not work if the docker daemon is not +# bound to the default communication port/socket +# If the docker daemon is using a unix socket for communication your user +# must have access to the socket for the completions to function correctly +# +# Note for developers: +# Please arrange options sorted alphabetically by long name with the short +# options immediately following their corresponding long form. +# This order should be applied to lists, alternatives and code blocks. + +__docker_previous_extglob_setting=$(shopt -p extglob) +shopt -s extglob + +__docker_q() { + docker ${host:+-H "$host"} ${config:+--config "$config"} 2>/dev/null "$@" +} + +# __docker_containers returns a list of containers. Additional options to +# `docker ps` may be specified in order to filter the list, e.g. +# `__docker_containers --filter status=running` +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_CONTAINER_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_containers() { + local format + if [ "$1" = "--id" ] ; then + format='{{.ID}}' + shift + elif [ "$1" = "--name" ] ; then + format='{{.Names}}' + shift + elif [ "${DOCKER_COMPLETION_SHOW_CONTAINER_IDS}" = yes ] ; then + format='{{.ID}} {{.Names}}' + else + format='{{.Names}}' + fi + __docker_q ps --format "$format" "$@" +} + +# __docker_complete_containers applies completion of containers based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_containers`. +__docker_complete_containers() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_containers "$@")" -- "$current") ) +} + +__docker_complete_containers_all() { + __docker_complete_containers "$@" --all +} + +__docker_complete_containers_removable() { + __docker_complete_containers "$@" --filter status=created --filter status=exited +} + +__docker_complete_containers_running() { + __docker_complete_containers "$@" --filter status=running +} + +__docker_complete_containers_stopped() { + __docker_complete_containers "$@" --filter status=exited +} + +__docker_complete_containers_unpauseable() { + __docker_complete_containers "$@" --filter status=paused +} + +__docker_complete_container_names() { + local containers=( $(__docker_q ps -aq --no-trunc) ) + local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) + names=( "${names[@]#/}" ) # trim off the leading "/" from the container names + COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") ) +} + +__docker_complete_container_ids() { + local containers=( $(__docker_q ps -aq) ) + COMPREPLY=( $(compgen -W "${containers[*]}" -- "$cur") ) +} + +__docker_images() { + local images_args="" + + case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in + all) + images_args="--no-trunc -a" + ;; + non-intermediate) + images_args="--no-trunc" + ;; + esac + + local repo_print_command + if [ "${DOCKER_COMPLETION_SHOW_TAGS:-yes}" = "yes" ]; then + repo_print_command='print $1; print $1":"$2' + else + repo_print_command='print $1' + fi + + local awk_script + case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in + all|non-intermediate) + awk_script='NR>1 { print $3; if ($1 != "") { '"$repo_print_command"' } }' + ;; + none|*) + awk_script='NR>1 && $1 != "" { '"$repo_print_command"' }' + ;; + esac + + __docker_q images $images_args | awk "$awk_script" | grep -v '$' +} + +__docker_complete_images() { + COMPREPLY=( $(compgen -W "$(__docker_images)" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +__docker_complete_image_repos() { + local repos="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1 }')" + COMPREPLY=( $(compgen -W "$repos" -- "$cur") ) +} + +__docker_complete_image_repos_and_tags() { + local reposAndTags="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1; print $1":"$2 }')" + COMPREPLY=( $(compgen -W "$reposAndTags" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +# __docker_networks returns a list of all networks. Additional options to +# `docker network ls` may be specified in order to filter the list, e.g. +# `__docker_networks --filter type=custom` +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_NETWORK_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_networks() { + local format + if [ "$1" = "--id" ] ; then + format='{{.ID}}' + shift + elif [ "$1" = "--name" ] ; then + format='{{.Name}}' + shift + elif [ "${DOCKER_COMPLETION_SHOW_NETWORK_IDS}" = yes ] ; then + format='{{.ID}} {{.Name}}' + else + format='{{.Name}}' + fi + __docker_q network ls --format "$format" "$@" +} + +# __docker_complete_networks applies completion of networks based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_networks`. +__docker_complete_networks() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_networks "$@")" -- "$current") ) +} + +__docker_complete_containers_in_network() { + local containers=$(__docker_q network inspect -f '{{range $i, $c := .Containers}}{{$i}} {{$c.Name}} {{end}}' "$1") + COMPREPLY=( $(compgen -W "$containers" -- "$cur") ) +} + +# __docker_volumes returns a list of all volumes. Additional options to +# `docker volume ls` may be specified in order to filter the list, e.g. +# `__docker_volumes --filter dangling=true` +# Because volumes do not have IDs, this function does not distinguish between +# IDs and names. +__docker_volumes() { + __docker_q volume ls -q "$@" +} + +# __docker_complete_volumes applies completion of volumes based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_volumes`. +__docker_complete_volumes() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_volumes "$@")" -- "$current") ) +} + +# __docker_plugins_bundled returns a list of all plugins of a given type. +# The type has to be specified with the mandatory option `--type`. +# Valid types are: Network, Volume, Authorization. +# Completions may be added or removed with `--add` and `--remove` +# This function only deals with plugins that come bundled with Docker. +# For plugins managed by `docker plugin`, see `__docker_plugins_installed`. +__docker_plugins_bundled() { + local type add=() remove=() + while true ; do + case "$1" in + --type) + type="$2" + shift 2 + ;; + --add) + add+=("$2") + shift 2 + ;; + --remove) + remove+=("$2") + shift 2 + ;; + *) + break + ;; + esac + done + + local plugins=($(__docker_q info --format "{{range \$i, \$p := .Plugins.$type}}{{.}} {{end}}")) + for del in "${remove[@]}" ; do + plugins=(${plugins[@]/$del/}) + done + echo "${plugins[@]} ${add[@]}" +} + +# __docker_complete_plugins_bundled applies completion of plugins based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# The plugin type has to be specified with the next option `--type`. +# This function only deals with plugins that come bundled with Docker. +# For completion of plugins managed by `docker plugin`, see +# `__docker_complete_plugins_installed`. +__docker_complete_plugins_bundled() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_plugins_bundled "$@")" -- "$current") ) +} + +# __docker_plugins_installed returns a list of all plugins that were installed with +# the Docker plugin API. +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_PLUGIN_IDS=yes to also complete IDs. +# Additional options to `docker plugin ls` may be specified in order to filter the list, +# e.g. `__docker_plugins_installed --filter enabled=true` +# For built-in pugins, see `__docker_plugins_bundled`. +__docker_plugins_installed() { + local format + if [ "$DOCKER_COMPLETION_SHOW_PLUGIN_IDS" = yes ] ; then + format='{{.ID}} {{.Name}}' + else + format='{{.Name}}' + fi + __docker_q plugin ls --format "$format" "$@" +} + +# __docker_complete_plugins_installed applies completion of plugins that were installed +# with the Docker plugin API, based on the current value of `$cur` or the value of +# the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_plugins_installed`. +# For completion of built-in pugins, see `__docker_complete_plugins_bundled`. +__docker_complete_plugins_installed() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_plugins_installed "$@")" -- "$current") ) +} + +__docker_runtimes() { + __docker_q info | sed -n 's/^Runtimes: \(.*\)/\1/p' +} + +__docker_complete_runtimes() { + COMPREPLY=( $(compgen -W "$(__docker_runtimes)" -- "$cur") ) +} + +# __docker_secrets returns a list of secrets. Additional options to +# `docker secret ls` may be specified in order to filter the list, e.g. +# `__docker_secrets --filter label=stage=production` +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_SECRET_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_secrets() { + local format + if [ "$1" = "--id" ] ; then + format='{{.ID}}' + shift + elif [ "$1" = "--name" ] ; then + format='{{.Name}}' + shift + elif [ "$DOCKER_COMPLETION_SHOW_SECRET_IDS" = yes ] ; then + format='{{.ID}} {{.Name}}' + else + format='{{.Name}}' + fi + + __docker_q secret ls --format "$format" "$@" +} + +# __docker_complete_secrets applies completion of secrets based on the current value +# of `$cur` or the value of the optional first option `--cur`, if given. +__docker_complete_secrets() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_secrets "$@")" -- "$current") ) +} + +# __docker_stacks returns a list of all stacks. +__docker_stacks() { + __docker_q stack ls | awk 'NR>1 {print $1}' +} + +# __docker_complete_stacks applies completion of stacks based on the current value +# of `$cur` or the value of the optional first option `--cur`, if given. +__docker_complete_stacks() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_stacks "$@")" -- "$current") ) +} + +# __docker_nodes returns a list of all nodes. Additional options to +# `docker node ls` may be specified in order to filter the list, e.g. +# `__docker_nodes --filter role=manager` +# By default, only node names are returned. +# Set DOCKER_COMPLETION_SHOW_NODE_IDS=yes to also complete node IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +# Completions may be added with `--add`, e.g. `--add self`. +__docker_nodes() { + local add=() + local fields='$2' # default: node name only + [ "${DOCKER_COMPLETION_SHOW_NODE_IDS}" = yes ] && fields='$1,$2' # ID and name + + while true ; do + case "$1" in + --id) + fields='$1' # IDs only + shift + ;; + --name) + fields='$2' # names only + shift + ;; + --add) + add+=("$2") + shift 2 + ;; + *) + break + ;; + esac + done + + echo $(__docker_q node ls "$@" | tr -d '*' | awk "NR>1 {print $fields}") "${add[@]}" +} + +# __docker_complete_nodes applies completion of nodes based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_nodes`. +__docker_complete_nodes() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_nodes "$@")" -- "$current") ) +} + +# __docker_services returns a list of all services. Additional options to +# `docker service ls` may be specified in order to filter the list, e.g. +# `__docker_services --filter name=xxx` +# By default, only node names are returned. +# Set DOCKER_COMPLETION_SHOW_SERVICE_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_services() { + local fields='$2' # default: service name only + [ "${DOCKER_COMPLETION_SHOW_SERVICE_IDS}" = yes ] && fields='$1,$2' # ID & name + + if [ "$1" = "--id" ] ; then + fields='$1' # IDs only + shift + elif [ "$1" = "--name" ] ; then + fields='$2' # names only + shift + fi + __docker_q service ls "$@" | awk "NR>1 {print $fields}" +} + +# __docker_complete_services applies completion of services based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_services`. +__docker_complete_services() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_services "$@")" -- "$current") ) +} + +# __docker_tasks returns a list of all task IDs. +__docker_tasks() { + __docker_q service ps --format '{{.ID}}' "" +} + +# __docker_complete_services_and_tasks applies completion of services and task IDs. +__docker_complete_services_and_tasks() { + COMPREPLY=( $(compgen -W "$(__docker_services "$@") $(__docker_tasks)" -- "$cur") ) +} + +# __docker_append_to_completions appends the word passed as an argument to every +# word in `$COMPREPLY`. +# Normally you do this with `compgen -S` while generating the completions. +# This function allows you to append a suffix later. It allows you to use +# the __docker_complete_XXX functions in cases where you need a suffix. +__docker_append_to_completions() { + COMPREPLY=( ${COMPREPLY[@]/%/"$1"} ) +} + +# __docker_daemon_is_experimental tests whether the currently configured Docker +# daemon runs in experimental mode. If so, the function exits with 0 (true). +# Otherwise, or if the result cannot be determined, the exit value is 1 (false). +__docker_daemon_is_experimental() { + [ "$(__docker_q version -f '{{.Server.Experimental}}')" = "true" ] +} + +# __docker_daemon_os_is tests whether the currently configured Docker daemon runs +# on the operating system passed in as the first argument. +# It does so by querying the daemon for its OS. The result is cached for the duration +# of one invocation of bash completion so that this function can be used to test for +# several different operating systems without additional costs. +# Known operating systems: linux, windows. +__docker_daemon_os_is() { + local expected_os="$1" + local actual_os=${daemon_os=$(__docker_q version -f '{{.Server.Os}}')} + [ "$actual_os" = "$expected_os" ] +} + +# __docker_pos_first_nonflag finds the position of the first word that is neither +# option nor an option's argument. If there are options that require arguments, +# you should pass a glob describing those options, e.g. "--option1|-o|--option2" +# Use this function to restrict completions to exact positions after the argument list. +__docker_pos_first_nonflag() { + local argument_flags=$1 + + local counter=$((${subcommand_pos:-${command_pos}} + 1)) + while [ $counter -le $cword ]; do + if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then + (( counter++ )) + # eat "=" in case of --option=arg syntax + [ "${words[$counter]}" = "=" ] && (( counter++ )) + else + case "${words[$counter]}" in + -*) + ;; + *) + break + ;; + esac + fi + + # Bash splits words at "=", retaining "=" as a word, examples: + # "--debug=false" => 3 words, "--log-opt syslog-facility=daemon" => 4 words + while [ "${words[$counter + 1]}" = "=" ] ; do + counter=$(( counter + 2)) + done + + (( counter++ )) + done + + echo $counter +} + +# __docker_map_key_of_current_option returns `key` if we are currently completing the +# value of a map option (`key=value`) which matches the extglob given as an argument. +# This function is needed for key-specific completions. +__docker_map_key_of_current_option() { + local glob="$1" + + local key glob_pos + if [ "$cur" = "=" ] ; then # key= case + key="$prev" + glob_pos=$((cword - 2)) + elif [[ $cur == *=* ]] ; then # key=value case (OSX) + key=${cur%=*} + glob_pos=$((cword - 1)) + elif [ "$prev" = "=" ] ; then + key=${words[$cword - 2]} # key=value case + glob_pos=$((cword - 3)) + else + return + fi + + [ "${words[$glob_pos]}" = "=" ] && ((glob_pos--)) # --option=key=value syntax + + [[ ${words[$glob_pos]} == @($glob) ]] && echo "$key" +} + +# __docker_value_of_option returns the value of the first option matching `option_glob`. +# Valid values for `option_glob` are option names like `--log-level` and globs like +# `--log-level|-l` +# Only positions between the command and the current word are considered. +__docker_value_of_option() { + local option_extglob=$(__docker_to_extglob "$1") + + local counter=$((command_pos + 1)) + while [ $counter -lt $cword ]; do + case ${words[$counter]} in + $option_extglob ) + echo ${words[$counter + 1]} + break + ;; + esac + (( counter++ )) + done +} + +# __docker_to_alternatives transforms a multiline list of strings into a single line +# string with the words separated by `|`. +# This is used to prepare arguments to __docker_pos_first_nonflag(). +__docker_to_alternatives() { + local parts=( $1 ) + local IFS='|' + echo "${parts[*]}" +} + +# __docker_to_extglob transforms a multiline list of options into an extglob pattern +# suitable for use in case statements. +__docker_to_extglob() { + local extglob=$( __docker_to_alternatives "$1" ) + echo "@($extglob)" +} + +# __docker_subcommands processes subcommands +# Locates the first occurrence of any of the subcommands contained in the +# first argument. In case of a match, calls the corresponding completion +# function and returns 0. +# If no match is found, 1 is returned. The calling function can then +# continue processing its completion. +# +# TODO if the preceding command has options that accept arguments and an +# argument is equal ot one of the subcommands, this is falsely detected as +# a match. +__docker_subcommands() { + local subcommands="$1" + + local counter=$(($command_pos + 1)) + while [ $counter -lt $cword ]; do + case "${words[$counter]}" in + $(__docker_to_extglob "$subcommands") ) + subcommand_pos=$counter + local subcommand=${words[$counter]} + local completions_func=_docker_${command}_${subcommand//-/_} + declare -F $completions_func >/dev/null && $completions_func + return 0 + ;; + esac + (( counter++ )) + done + return 1 +} + +# __docker_nospace suppresses trailing whitespace +__docker_nospace() { + # compopt is not available in ancient bash versions + type compopt &>/dev/null && compopt -o nospace +} + +__docker_complete_resolved_hostname() { + command -v host >/dev/null 2>&1 || return + COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') ) +} + +__docker_local_interfaces() { + command -v ip >/dev/null 2>&1 || return + ip addr show scope global 2>/dev/null | sed -n 's| \+inet \([0-9.]\+\).* \([^ ]\+\)|\1 \2|p' +} + +__docker_complete_local_interfaces() { + local additional_interface + if [ "$1" = "--add" ] ; then + additional_interface="$2" + fi + + COMPREPLY=( $( compgen -W "$(__docker_local_interfaces) $additional_interface" -- "$cur" ) ) +} + +# __docker_complete_capabilities_addable completes Linux capabilities which are +# not granted by default and may be added. +# see https://docs.docker.com/engine/reference/run/#/runtime-privilege-and-linux-capabilities +__docker_complete_capabilities_addable() { + COMPREPLY=( $( compgen -W " + ALL + AUDIT_CONTROL + BLOCK_SUSPEND + DAC_READ_SEARCH + IPC_LOCK + IPC_OWNER + LEASE + LINUX_IMMUTABLE + MAC_ADMIN + MAC_OVERRIDE + NET_ADMIN + NET_BROADCAST + SYS_ADMIN + SYS_BOOT + SYSLOG + SYS_MODULE + SYS_NICE + SYS_PACCT + SYS_PTRACE + SYS_RAWIO + SYS_RESOURCE + SYS_TIME + SYS_TTY_CONFIG + WAKE_ALARM + " -- "$cur" ) ) +} + +# __docker_complete_capabilities_droppable completes Linux capability options which are +# allowed by default and can be dropped. +# see https://docs.docker.com/engine/reference/run/#/runtime-privilege-and-linux-capabilities +__docker_complete_capabilities_droppable() { + COMPREPLY=( $( compgen -W " + ALL + AUDIT_WRITE + CHOWN + DAC_OVERRIDE + FOWNER + FSETID + KILL + MKNOD + NET_BIND_SERVICE + NET_RAW + SETFCAP + SETGID + SETPCAP + SETUID + SYS_CHROOT + " -- "$cur" ) ) +} + +__docker_complete_detach_keys() { + case "$prev" in + --detach-keys) + case "$cur" in + *,) + COMPREPLY=( $( compgen -W "${cur}ctrl-" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "ctrl-" -- "$cur" ) ) + ;; + esac + + __docker_nospace + return + ;; + esac + return 1 +} + +__docker_complete_isolation() { + COMPREPLY=( $( compgen -W "default hyperv process" -- "$cur" ) ) +} + +__docker_complete_log_drivers() { + COMPREPLY=( $( compgen -W " + awslogs + etwlogs + fluentd + gcplogs + gelf + journald + json-file + logentries + none + splunk + syslog + " -- "$cur" ) ) +} + +__docker_complete_log_options() { + # see repository docker/docker.github.io/engine/admin/logging/ + + # really global options, defined in https://github.com/moby/moby/blob/master/daemon/logger/factory.go + local common_options1="max-buffer-size mode" + # common options defined in https://github.com/moby/moby/blob/master/daemon/logger/loginfo.go + # but not implemented in all log drivers + local common_options2="env env-regex labels" + + # awslogs does not implement the $common_options2. + local awslogs_options="$common_options1 awslogs-create-group awslogs-group awslogs-region awslogs-stream tag" + + local fluentd_options="$common_options1 $common_options2 fluentd-address fluentd-async-connect fluentd-buffer-limit fluentd-retry-wait fluentd-max-retries tag" + local gcplogs_options="$common_options1 $common_options2 gcp-log-cmd gcp-meta-id gcp-meta-name gcp-meta-zone gcp-project" + local gelf_options="$common_options1 $common_options2 gelf-address gelf-compression-level gelf-compression-type tag" + local journald_options="$common_options1 $common_options2 tag" + local json_file_options="$common_options1 $common_options2 max-file max-size" + local logentries_options="$common_options1 $common_options2 logentries-token tag" + local splunk_options="$common_options1 $common_options2 splunk-caname splunk-capath splunk-format splunk-gzip splunk-gzip-level splunk-index splunk-insecureskipverify splunk-source splunk-sourcetype splunk-token splunk-url splunk-verify-connection tag" + local syslog_options="$common_options1 $common_options2 syslog-address syslog-facility syslog-format syslog-tls-ca-cert syslog-tls-cert syslog-tls-key syslog-tls-skip-verify tag" + + local all_options="$fluentd_options $gcplogs_options $gelf_options $journald_options $logentries_options $json_file_options $syslog_options $splunk_options" + + case $(__docker_value_of_option --log-driver) in + '') + COMPREPLY=( $( compgen -W "$all_options" -S = -- "$cur" ) ) + ;; + awslogs) + COMPREPLY=( $( compgen -W "$awslogs_options" -S = -- "$cur" ) ) + ;; + fluentd) + COMPREPLY=( $( compgen -W "$fluentd_options" -S = -- "$cur" ) ) + ;; + gcplogs) + COMPREPLY=( $( compgen -W "$gcplogs_options" -S = -- "$cur" ) ) + ;; + gelf) + COMPREPLY=( $( compgen -W "$gelf_options" -S = -- "$cur" ) ) + ;; + journald) + COMPREPLY=( $( compgen -W "$journald_options" -S = -- "$cur" ) ) + ;; + json-file) + COMPREPLY=( $( compgen -W "$json_file_options" -S = -- "$cur" ) ) + ;; + logentries) + COMPREPLY=( $( compgen -W "$logentries_options" -S = -- "$cur" ) ) + ;; + syslog) + COMPREPLY=( $( compgen -W "$syslog_options" -S = -- "$cur" ) ) + ;; + splunk) + COMPREPLY=( $( compgen -W "$splunk_options" -S = -- "$cur" ) ) + ;; + *) + return + ;; + esac + + __docker_nospace +} + +__docker_complete_log_driver_options() { + local key=$(__docker_map_key_of_current_option '--log-opt') + case "$key" in + awslogs-create-group) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + fluentd-async-connect) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + gelf-address) + COMPREPLY=( $( compgen -W "udp" -S "://" -- "${cur##*=}" ) ) + __docker_nospace + return + ;; + gelf-compression-level) + COMPREPLY=( $( compgen -W "1 2 3 4 5 6 7 8 9" -- "${cur##*=}" ) ) + return + ;; + gelf-compression-type) + COMPREPLY=( $( compgen -W "gzip none zlib" -- "${cur##*=}" ) ) + return + ;; + mode) + COMPREPLY=( $( compgen -W "blocking non-blocking" -- "${cur##*=}" ) ) + return + ;; + syslog-address) + COMPREPLY=( $( compgen -W "tcp:// tcp+tls:// udp:// unix://" -- "${cur##*=}" ) ) + __docker_nospace + __ltrim_colon_completions "${cur}" + return + ;; + syslog-facility) + COMPREPLY=( $( compgen -W " + auth + authpriv + cron + daemon + ftp + kern + local0 + local1 + local2 + local3 + local4 + local5 + local6 + local7 + lpr + mail + news + syslog + user + uucp + " -- "${cur##*=}" ) ) + return + ;; + syslog-format) + COMPREPLY=( $( compgen -W "rfc3164 rfc5424 rfc5424micro" -- "${cur##*=}" ) ) + return + ;; + syslog-tls-ca-cert|syslog-tls-cert|syslog-tls-key) + _filedir + return + ;; + syslog-tls-skip-verify) + COMPREPLY=( $( compgen -W "true" -- "${cur##*=}" ) ) + return + ;; + splunk-url) + COMPREPLY=( $( compgen -W "http:// https://" -- "${cur##*=}" ) ) + __docker_nospace + __ltrim_colon_completions "${cur}" + return + ;; + splunk-gzip|splunk-insecureskipverify|splunk-verify-connection) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + splunk-format) + COMPREPLY=( $( compgen -W "inline json raw" -- "${cur##*=}" ) ) + return + ;; + esac + return 1 +} + +__docker_complete_log_levels() { + COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) ) +} + +__docker_complete_restart() { + case "$prev" in + --restart) + case "$cur" in + on-failure:*) + ;; + *) + COMPREPLY=( $( compgen -W "always no on-failure on-failure: unless-stopped" -- "$cur") ) + ;; + esac + return + ;; + esac + return 1 +} + +# __docker_complete_signals returns a subset of the available signals that is most likely +# relevant in the context of docker containers +__docker_complete_signals() { + local signals=( + SIGCONT + SIGHUP + SIGINT + SIGKILL + SIGQUIT + SIGSTOP + SIGTERM + SIGUSR1 + SIGUSR2 + ) + COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo $cur | tr '[:lower:]' '[:upper:]')" ) ) +} + +__docker_complete_user_group() { + if [[ $cur == *:* ]] ; then + COMPREPLY=( $(compgen -g -- "${cur#*:}") ) + else + COMPREPLY=( $(compgen -u -S : -- "$cur") ) + __docker_nospace + fi +} + +_docker_docker() { + # global options that may appear after the docker command + local boolean_options=" + $global_boolean_options + --help + --version -v + " + + case "$prev" in + --config) + _filedir -d + return + ;; + --log-level|-l) + __docker_complete_log_levels + return + ;; + $(__docker_to_extglob "$global_options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $global_options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag "$(__docker_to_extglob "$global_options_with_args")" ) + if [ $cword -eq $counter ]; then + __docker_daemon_is_experimental && commands+=(${experimental_commands[*]}) + COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) + fi + ;; + esac +} + +_docker_attach() { + _docker_container_attach +} + +_docker_build() { + _docker_image_build +} + + +_docker_checkpoint() { + local subcommands=" + create + ls + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_checkpoint_create() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help --leave-running" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_checkpoint_ls() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_checkpoint_rm() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + elif [ $cword -eq $(($counter + 1)) ]; then + COMPREPLY=( $( compgen -W "$(__docker_q checkpoint ls "$prev" | sed 1d)" -- "$cur" ) ) + fi + ;; + esac +} + + +_docker_container() { + local subcommands=" + attach + commit + cp + create + diff + exec + export + inspect + kill + logs + ls + pause + port + prune + rename + restart + rm + run + start + stats + stop + top + unpause + update + wait + " + local aliases=" + list + ps + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_container_attach() { + __docker_complete_detach_keys && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--detach-keys --help --no-stdin --sig-proxy=false" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--detach-keys') + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_container_commit() { + case "$prev" in + --author|-a|--change|-c|--message|-m) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--author -a --change -c --help --message -m --pause=false -p=false" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--author|-a|--change|-c|--message|-m') + + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_container_cp() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--follow-link -L --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + case "$cur" in + *:) + return + ;; + *) + # combined container and filename completion + _filedir + local files=( ${COMPREPLY[@]} ) + + __docker_complete_containers_all + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + local containers=( ${COMPREPLY[@]} ) + + COMPREPLY=( $( compgen -W "${files[*]} ${containers[*]}" -- "$cur" ) ) + if [[ "$COMPREPLY" == *: ]]; then + __docker_nospace + fi + return + ;; + esac + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + if [ -e "$prev" ]; then + __docker_complete_containers_all + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + else + _filedir + fi + return + fi + ;; + esac +} + +_docker_container_create() { + _docker_container_run_and_create +} + +_docker_container_diff() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_exec() { + __docker_complete_detach_keys && return + + case "$prev" in + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--detach -d --detach-keys --env -e --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_export() { + case "$prev" in + --output|-o) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_inspect() { + _docker_inspect --type container +} + +_docker_container_kill() { + case "$prev" in + --signal|-s) + __docker_complete_signals + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --signal -s" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_logs() { + case "$prev" in + --since|--tail) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--details --follow -f --help --since --tail --timestamps -t" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--since|--tail') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_list() { + _docker_container_ls +} + +_docker_container_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + ancestor) + cur="${cur##*=}" + __docker_complete_images + return + ;; + before) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + expose|publish) + return + ;; + id) + __docker_complete_containers_all --cur "${cur##*=}" --id + return + ;; + health) + COMPREPLY=( $( compgen -W "healthy starting none unhealthy" -- "${cur##*=}" ) ) + return + ;; + is-task) + COMPREPLY=( $( compgen -W "true false" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_containers_all --cur "${cur##*=}" --name + return + ;; + network) + __docker_complete_networks --cur "${cur##*=}" + return + ;; + since) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + status) + COMPREPLY=( $( compgen -W "created dead exited paused restarting running removing" -- "${cur##*=}" ) ) + return + ;; + volume) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "ancestor before exited expose health id is-task label name network publish since status volume" -- "$cur" ) ) + __docker_nospace + return + ;; + --format|--last|-n) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --filter -f --format --help --last -n --latest -l --no-trunc --quiet -q --size -s" -- "$cur" ) ) + ;; + esac +} + +_docker_container_pause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_port() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_prune() { + case "$prev" in + --filter) + COMPREPLY=( $( compgen -W "until" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --filter --help" -- "$cur" ) ) + ;; + esac +} + +_docker_container_ps() { + _docker_container_ls +} + +_docker_container_rename() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_restart() { + case "$prev" in + --time|-t) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + +_docker_container_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help --link -l --volumes -v" -- "$cur" ) ) + ;; + *) + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + --force|-f) + __docker_complete_containers_all + return + ;; + esac + done + __docker_complete_containers_removable + ;; + esac +} + +_docker_container_run() { + _docker_container_run_and_create +} + +# _docker_container_run_and_create is the combined completion for `_docker_container_run` +# and `_docker_container_create` +_docker_container_run_and_create() { + local options_with_args=" + --add-host + --attach -a + --blkio-weight + --blkio-weight-device + --cap-add + --cap-drop + --cgroup-parent + --cidfile + --cpu-period + --cpu-quota + --cpu-rt-period + --cpu-rt-runtime + --cpuset-cpus + --cpus + --cpuset-mems + --cpu-shares -c + --device + --device-cgroup-rule + --device-read-bps + --device-read-iops + --device-write-bps + --device-write-iops + --dns + --dns-option + --dns-search + --entrypoint + --env -e + --env-file + --expose + --group-add + --health-cmd + --health-interval + --health-retries + --health-start-period + --health-timeout + --hostname -h + --ip + --ip6 + --ipc + --kernel-memory + --label-file + --label -l + --link + --link-local-ip + --log-driver + --log-opt + --mac-address + --memory -m + --memory-swap + --memory-swappiness + --memory-reservation + --mount + --name + --network + --network-alias + --oom-score-adj + --pid + --pids-limit + --publish -p + --restart + --runtime + --security-opt + --shm-size + --stop-signal + --stop-timeout + --storage-opt + --tmpfs + --sysctl + --ulimit + --user -u + --userns + --uts + --volume-driver + --volumes-from + --volume -v + --workdir -w + " + __docker_daemon_os_is windows && options_with_args+=" + --cpu-count + --cpu-percent + --io-maxbandwidth + --io-maxiops + --isolation + " + + local boolean_options=" + --disable-content-trust=false + --help + --init + --interactive -i + --no-healthcheck + --oom-kill-disable + --privileged + --publish-all -P + --read-only + --tty -t + " + + if [ "$command" = "run" -o "$subcommand" = "run" ] ; then + options_with_args="$options_with_args + --detach-keys + " + boolean_options="$boolean_options + --detach -d + --rm + --sig-proxy=false + " + __docker_complete_detach_keys && return + fi + + local all_options="$options_with_args $boolean_options" + + + __docker_complete_log_driver_options && return + __docker_complete_restart && return + + local key=$(__docker_map_key_of_current_option '--security-opt') + case "$key" in + label) + [[ $cur == *: ]] && return + COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "${cur##*=}") ) + if [ "${COMPREPLY[*]}" != "disable" ] ; then + __docker_nospace + fi + return + ;; + seccomp) + local cur=${cur##*=} + _filedir + COMPREPLY+=( $( compgen -W "unconfined" -- "$cur" ) ) + return + ;; + esac + + case "$prev" in + --add-host) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --attach|-a) + COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) + return + ;; + --cap-add) + __docker_complete_capabilities_addable + return + ;; + --cap-drop) + __docker_complete_capabilities_droppable + return + ;; + --cidfile|--env-file|--label-file) + _filedir + return + ;; + --device|--tmpfs|--volume|-v) + case "$cur" in + *:*) + # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) + ;; + '') + COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) + __docker_nospace + ;; + /*) + _filedir + __docker_nospace + ;; + esac + return + ;; + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --ipc) + case "$cur" in + *:*) + cur="${cur#*:}" + __docker_complete_containers_running + ;; + *) + COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) + if [ "$COMPREPLY" = "container:" ]; then + __docker_nospace + fi + ;; + esac + return + ;; + --isolation) + if __docker_daemon_os_is windows ; then + __docker_complete_isolation + return + fi + ;; + --link) + case "$cur" in + *:*) + ;; + *) + __docker_complete_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + ;; + esac + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --network) + case "$cur" in + container:*) + __docker_complete_containers_all --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W "$(__docker_plugins_bundled --type Network) $(__docker_networks) container:" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + __docker_nospace + fi + ;; + esac + return + ;; + --pid) + case "$cur" in + *:*) + __docker_complete_containers_running --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) + if [ "$COMPREPLY" = "container:" ]; then + __docker_nospace + fi + ;; + esac + return + ;; + --runtime) + __docker_complete_runtimes + return + ;; + --security-opt) + COMPREPLY=( $( compgen -W "apparmor= label= no-new-privileges seccomp=" -- "$cur") ) + if [ "${COMPREPLY[*]}" != "no-new-privileges" ] ; then + __docker_nospace + fi + return + ;; + --stop-signal) + __docker_complete_signals + return + ;; + --storage-opt) + COMPREPLY=( $( compgen -W "size" -S = -- "$cur") ) + __docker_nospace + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + --userns) + COMPREPLY=( $( compgen -W "host" -- "$cur" ) ) + return + ;; + --volume-driver) + __docker_complete_plugins_bundled --type Volume + return + ;; + --volumes-from) + __docker_complete_containers_all + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + ;; + esac +} + +_docker_container_start() { + __docker_complete_detach_keys && return + + case "$prev" in + --checkpoint) + if [ __docker_daemon_is_experimental ] ; then + return + fi + ;; + --checkpoint-dir) + if [ __docker_daemon_is_experimental ] ; then + _filedir -d + return + fi + ;; + esac + + case "$cur" in + -*) + local options="--attach -a --detach-keys --help --interactive -i" + __docker_daemon_is_experimental && options+=" --checkpoint --checkpoint-dir" + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_stopped + ;; + esac +} + +_docker_container_stats() { + case "$prev" in + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --format --help --no-stream" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_stop() { + case "$prev" in + --time|-t) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_top() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_container_unpause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_unpauseable + fi + ;; + esac +} + +_docker_container_update() { + local options_with_args=" + --blkio-weight + --cpu-period + --cpu-quota + --cpu-rt-period + --cpu-rt-runtime + --cpus + --cpuset-cpus + --cpuset-mems + --cpu-shares -c + --kernel-memory + --memory -m + --memory-reservation + --memory-swap + --restart + " + + local boolean_options=" + --help + " + + local all_options="$options_with_args $boolean_options" + + __docker_complete_restart && return + + case "$prev" in + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + +_docker_container_wait() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + + +_docker_commit() { + _docker_container_commit +} + +_docker_cp() { + _docker_container_cp +} + +_docker_create() { + _docker_container_create +} + +_docker_daemon() { + local boolean_options=" + $global_boolean_options + --disable-legacy-registry + --experimental + --help + --icc=false + --init + --ip-forward=false + --ip-masq=false + --iptables=false + --ipv6 + --live-restore + --raw-logs + --selinux-enabled + --userland-proxy=false + " + local options_with_args=" + $global_options_with_args + --add-runtime + --allow-nondistributable-artifacts + --api-cors-header + --authorization-plugin + --bip + --bridge -b + --cgroup-parent + --cluster-advertise + --cluster-store + --cluster-store-opt + --config-file + --containerd + --data-root + --default-gateway + --default-gateway-v6 + --default-shm-size + --default-ulimit + --dns + --dns-search + --dns-opt + --exec-opt + --exec-root + --fixed-cidr + --fixed-cidr-v6 + --group -G + --init-path + --insecure-registry + --ip + --label + --log-driver + --log-opt + --max-concurrent-downloads + --max-concurrent-uploads + --mtu + --oom-score-adjust + --pidfile -p + --registry-mirror + --seccomp-profile + --shutdown-timeout + --storage-driver -s + --storage-opt + --userland-proxy-path + --userns-remap + " + + __docker_complete_log_driver_options && return + + key=$(__docker_map_key_of_current_option '--cluster-store-opt') + case "$key" in + kv.*file) + cur=${cur##*=} + _filedir + return + ;; + esac + + local key=$(__docker_map_key_of_current_option '--storage-opt') + case "$key" in + dm.blkdiscard|dm.override_udev_sync_check|dm.use_deferred_removal|dm.use_deferred_deletion) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + dm.directlvm_device|dm.thinpooldev) + cur=${cur##*=} + _filedir + return + ;; + dm.fs) + COMPREPLY=( $( compgen -W "ext4 xfs" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --authorization-plugin) + __docker_complete_plugins_bundled --type Authorization + return + ;; + --cluster-store) + COMPREPLY=( $( compgen -W "consul etcd zk" -S "://" -- "$cur" ) ) + __docker_nospace + return + ;; + --cluster-store-opt) + COMPREPLY=( $( compgen -W "discovery.heartbeat discovery.ttl kv.cacertfile kv.certfile kv.keyfile kv.path" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --config-file|--containerd|--init-path|--pidfile|-p|--tlscacert|--tlscert|--tlskey|--userland-proxy-path) + _filedir + return + ;; + --exec-root|--data-root) + _filedir -d + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --storage-driver|-s) + COMPREPLY=( $( compgen -W "aufs btrfs devicemapper overlay overlay2 vfs zfs" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) ) + return + ;; + --storage-opt) + local btrfs_options="btrfs.min_space" + local devicemapper_options=" + dm.basesize + dm.blkdiscard + dm.blocksize + dm.directlvm_device + dm.fs + dm.loopdatasize + dm.loopmetadatasize + dm.min_free_space + dm.mkfsarg + dm.mountopt + dm.override_udev_sync_check + dm.thinpooldev + dm.thinp_autoextend_percent + dm.thinp_autoextend_threshold + dm.thinp_metapercent + dm.thinp_percent + dm.use_deferred_deletion + dm.use_deferred_removal + " + local zfs_options="zfs.fsname" + + case $(__docker_value_of_option '--storage-driver|-s') in + '') + COMPREPLY=( $( compgen -W "$btrfs_options $devicemapper_options $zfs_options" -S = -- "$cur" ) ) + ;; + btrfs) + COMPREPLY=( $( compgen -W "$btrfs_options" -S = -- "$cur" ) ) + ;; + devicemapper) + COMPREPLY=( $( compgen -W "$devicemapper_options" -S = -- "$cur" ) ) + ;; + zfs) + COMPREPLY=( $( compgen -W "$zfs_options" -S = -- "$cur" ) ) + ;; + *) + return + ;; + esac + __docker_nospace + return + ;; + --log-level|-l) + __docker_complete_log_levels + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --seccomp-profile) + _filedir json + return + ;; + --userns-remap) + __docker_complete_user_group + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + esac +} + +_docker_deploy() { + __docker_daemon_is_experimental && _docker_stack_deploy +} + +_docker_diff() { + _docker_container_diff +} + +_docker_events() { + _docker_system_events +} + +_docker_exec() { + _docker_container_exec +} + +_docker_export() { + _docker_container_export +} + +_docker_help() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) + fi +} + +_docker_history() { + _docker_image_history +} + + +_docker_image() { + local subcommands=" + build + history + import + inspect + load + ls + prune + pull + push + rm + save + tag + " + local aliases=" + images + list + remove + rmi + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_image_build() { + local options_with_args=" + --add-host + --build-arg + --cache-from + --cgroup-parent + --cpuset-cpus + --cpuset-mems + --cpu-shares -c + --cpu-period + --cpu-quota + --file -f + --label + --memory -m + --memory-swap + --network + --shm-size + --tag -t + --ulimit + " + __docker_daemon_os_is windows && options_with_args+=" + --isolation + " + + local boolean_options=" + --compress + --disable-content-trust=false + --force-rm + --help + --no-cache + --pull + --quiet -q + --rm + " + __docker_daemon_is_experimental && boolean_options+="--squash" + + local all_options="$options_with_args $boolean_options" + + case "$prev" in + --add-host) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --build-arg) + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --cache-from) + __docker_complete_image_repos_and_tags + return + ;; + --file|-f) + _filedir + return + ;; + --isolation) + if __docker_daemon_os_is windows ; then + __docker_complete_isolation + return + fi + ;; + --network) + case "$cur" in + container:*) + __docker_complete_containers_all --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W "$(__docker_plugins_bundled --type Network) $(__docker_networks) container:" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + __docker_nospace + fi + ;; + esac + return + ;; + --tag|-t) + __docker_complete_image_repos_and_tags + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + _filedir -d + fi + ;; + esac +} + +_docker_image_history() { + case "$prev" in + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format --help --human=false -H=false --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + ;; + esac +} + +_docker_image_images() { + _docker_image_ls +} + +_docker_image_import() { + case "$prev" in + --change|-c|--message|-m) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--change -c --help --message -m" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--change|-c|--message|-m') + if [ $cword -eq $counter ]; then + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_image_inspect() { + _docker_inspect --type image +} + +_docker_image_load() { + case "$prev" in + --input|-i) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --input -i --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_image_list() { + _docker_image_ls +} + +_docker_image_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + before|since|reference) + cur="${cur##*=}" + __docker_complete_images + return + ;; + dangling) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + label) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "before dangling label reference since" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --digests --filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + =) + return + ;; + *) + __docker_complete_image_repos + ;; + esac +} + +_docker_image_prune() { + case "$prev" in + --filter) + COMPREPLY=( $( compgen -W "until" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --force -f --filter --help" -- "$cur" ) ) + ;; + esac +} + +_docker_image_pull() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all-tags -a --disable-content-trust=false --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + --all-tags|-a) + __docker_complete_image_repos + return + ;; + esac + done + __docker_complete_image_repos_and_tags + fi + ;; + esac +} + +_docker_image_push() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--disable-content-trust=false --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + fi + ;; + esac +} + +_docker_image_remove() { + _docker_image_rm +} + +_docker_image_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help --no-prune" -- "$cur" ) ) + ;; + *) + __docker_complete_images + ;; + esac +} + +_docker_image_rmi() { + _docker_image_rm +} + +_docker_image_save() { + case "$prev" in + --output|-o) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) + ;; + *) + __docker_complete_images + ;; + esac +} + +_docker_image_tag() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + + +_docker_images() { + _docker_image_ls +} + +_docker_import() { + _docker_image_import +} + +_docker_info() { + _docker_system_info +} + +_docker_inspect() { + local preselected_type + local type + + if [ "$1" = "--type" ] ; then + preselected_type=yes + type="$2" + else + type=$(__docker_value_of_option --type) + fi + + case "$prev" in + --format|-f) + return + ;; + --type) + if [ -z "$preselected_type" ] ; then + COMPREPLY=( $( compgen -W "container image network node plugin secret service volume" -- "$cur" ) ) + return + fi + ;; + esac + + case "$cur" in + -*) + local options="--format -f --help --size -s" + if [ -z "$preselected_type" ] ; then + options+=" --type" + fi + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + *) + case "$type" in + '') + COMPREPLY=( $( compgen -W " + $(__docker_containers --all) + $(__docker_images) + $(__docker_networks) + $(__docker_nodes) + $(__docker_plugins_installed) + $(__docker_secrets) + $(__docker_services) + $(__docker_volumes) + " -- "$cur" ) ) + ;; + container) + __docker_complete_containers_all + ;; + image) + __docker_complete_images + ;; + network) + __docker_complete_networks + ;; + node) + __docker_complete_nodes + ;; + plugin) + __docker_complete_plugins_installed + ;; + secret) + __docker_complete_secrets + ;; + service) + __docker_complete_services + ;; + volume) + __docker_complete_volumes + ;; + esac + esac +} + +_docker_kill() { + _docker_container_kill +} + +_docker_load() { + _docker_image_load +} + +_docker_login() { + case "$prev" in + --password|-p|--username|-u) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --password -p --username -u" -- "$cur" ) ) + ;; + esac +} + +_docker_logout() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_logs() { + _docker_container_logs +} + +_docker_network_connect() { + local options_with_args=" + --alias + --ip + --ip6 + --link + --link-local-ip + " + + local boolean_options=" + --help + " + + case "$prev" in + --link) + case "$cur" in + *:*) + ;; + *) + __docker_complete_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + ;; + esac + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + __docker_complete_networks + elif [ $cword -eq $(($counter + 1)) ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_network_create() { + case "$prev" in + --aux-address|--gateway|--internal|--ip-range|--ipam-opt|--ipv6|--opt|-o|--subnet) + return + ;; + --ipam-driver) + COMPREPLY=( $( compgen -W "default" -- "$cur" ) ) + return + ;; + --driver|-d) + # remove drivers that allow one instance only, add drivers missing in `docker info` + __docker_complete_plugins_bundled --type Network --remove host --remove null --add macvlan + return + ;; + --label) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--attachable --aux-address --driver -d --gateway --help --internal --ip-range --ipam-driver --ipam-opt --ipv6 --label --opt -o --subnet" -- "$cur" ) ) + ;; + esac +} + +_docker_network_disconnect() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_networks + elif [ $cword -eq $(($counter + 1)) ]; then + __docker_complete_containers_in_network "$prev" + fi + ;; + esac +} + +_docker_network_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help --verbose" -- "$cur" ) ) + ;; + *) + __docker_complete_networks + esac +} + +_docker_network_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + driver) + __docker_complete_plugins_bundled --cur "${cur##*=}" --type Network --add macvlan + return + ;; + id) + __docker_complete_networks --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_networks --cur "${cur##*=}" --name + return + ;; + scope) + COMPREPLY=( $( compgen -W "global local swarm" -- "${cur##*=}" ) ) + return + ;; + type) + COMPREPLY=( $( compgen -W "builtin custom" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "driver id label name scope type" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_network_prune() { + case "$prev" in + --filter) + COMPREPLY=( $( compgen -W "until" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --filter --help" -- "$cur" ) ) + ;; + esac +} + +_docker_network_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_networks --filter type=custom + esac +} + +_docker_network() { + local subcommands=" + connect + create + disconnect + inspect + ls + prune + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_service() { + local subcommands=" + create + inspect + logs + ls + rm + scale + ps + update + " + + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_service_create() { + _docker_service_update_and_create +} + +_docker_service_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) + ;; + *) + __docker_complete_services + esac +} + +_docker_service_logs() { + case "$prev" in + --since|--tail) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--follow -f --help --no-resolve --no-task-ids --no-trunc --since --tail --timestamps -t" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--since|--tail') + if [ $cword -eq $counter ]; then + __docker_complete_services_and_tasks + fi + ;; + esac +} + +_docker_service_list() { + _docker_service_ls +} + +_docker_service_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_services --cur "${cur##*=}" --id + return + ;; + mode) + COMPREPLY=( $( compgen -W "global replicated" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "id label mode name" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_service_remove() { + _docker_service_rm +} + +_docker_service_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_services + esac +} + +_docker_service_scale() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_services + __docker_append_to_completions "=" + __docker_nospace + ;; + esac +} + +_docker_service_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running shutdown" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + node) + __docker_complete_nodes --cur "${cur##*=}" --add self + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "desired-state id name node" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --no-resolve --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f') + if [ $cword -eq $counter ]; then + __docker_complete_services + fi + ;; + esac +} + +_docker_service_update() { + _docker_service_update_and_create +} + +# _docker_service_update_and_create is the combined completion for `docker service create` +# and `docker service update` +_docker_service_update_and_create() { + local options_with_args=" + --endpoint-mode + --env -e + --force + --health-cmd + --health-interval + --health-retries + --health-start-period + --health-timeout + --hostname + --label -l + --limit-cpu + --limit-memory + --log-driver + --log-opt + --mount + --network + --replicas + --reserve-cpu + --reserve-memory + --restart-condition + --restart-delay + --restart-max-attempts + --restart-window + --rollback-delay + --rollback-failure-action + --rollback-max-failure-ratio + --rollback-monitor + --rollback-parallelism + --stop-grace-period + --stop-signal + --update-delay + --update-failure-action + --update-max-failure-ratio + --update-monitor + --update-parallelism + --user -u + --workdir -w + " + + local boolean_options=" + --help + --no-healthcheck + --read-only + --tty -t + --with-registry-auth + " + + __docker_complete_log_driver_options && return + + if [ "$subcommand" = "create" ] ; then + options_with_args="$options_with_args + --constraint + --container-label + --dns + --dns-option + --dns-search + --env-file + --group + --host + --mode + --name + --placement-pref + --publish -p + --secret + " + + case "$prev" in + --env-file) + _filedir + return + ;; + --group) + COMPREPLY=( $(compgen -g -- "$cur") ) + return + ;; + --host) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --mode) + COMPREPLY=( $( compgen -W "global replicated" -- "$cur" ) ) + return + ;; + --placement-pref) + COMPREPLY=( $( compgen -W "spread" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --secret) + __docker_complete_secrets + return + ;; + esac + fi + if [ "$subcommand" = "update" ] ; then + options_with_args="$options_with_args + --arg + --constraint-add + --constraint-rm + --container-label-add + --container-label-rm + --dns-add + --dns-option-add + --dns-option-rm + --dns-rm + --dns-search-add + --dns-search-rm + --group-add + --group-rm + --host-add + --host-rm + --image + --placement-pref-add + --placement-pref-rm + --publish-add + --publish-rm + --rollback + --secret-add + --secret-rm + " + + case "$prev" in + --group-add|--group-rm) + COMPREPLY=( $(compgen -g -- "$cur") ) + return + ;; + --host-add|--host-rm) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --image) + __docker_complete_image_repos_and_tags + return + ;; + --placement-pref-add|--placement-pref-rm) + COMPREPLY=( $( compgen -W "spread" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --secret-add|--secret-rm) + __docker_complete_secrets + return + ;; + esac + fi + + local strategy=$(__docker_map_key_of_current_option '--placement-pref|--placement-pref-add|--placement-pref-rm') + case "$strategy" in + spread) + COMPREPLY=( $( compgen -W "engine.labels node.labels" -S . -- "${cur##*=}" ) ) + __docker_nospace + return + ;; + esac + + case "$prev" in + --endpoint-mode) + COMPREPLY=( $( compgen -W "dnsrr vip" -- "$cur" ) ) + return + ;; + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --network) + __docker_complete_networks + return + ;; + --restart-condition) + COMPREPLY=( $( compgen -W "any none on-failure" -- "$cur" ) ) + return + ;; + --rollback-failure-action) + COMPREPLY=( $( compgen -W "continue pause" -- "$cur" ) ) + return + ;; + --stop-signal) + __docker_complete_signals + return + ;; + --update-failure-action) + COMPREPLY=( $( compgen -W "continue pause rollback" -- "$cur" ) ) + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ "$subcommand" = "update" ] ; then + if [ $cword -eq $counter ]; then + __docker_complete_services + fi + else + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + fi + ;; + esac +} + +_docker_swarm() { + local subcommands=" + init + join + join-token + leave + unlock + unlock-key + update + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_init() { + case "$prev" in + --advertise-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces + __docker_nospace + fi + return + ;; + --availability) + COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) + return + ;; + --cert-expiry|--dispatcher-heartbeat|--external-ca|--max-snapshots|--snapshot-interval|--task-history-limit) + return + ;; + --listen-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces --add 0.0.0.0 + __docker_nospace + fi + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--advertise-addr --data-path-addr --autolock --availability --cert-expiry --dispatcher-heartbeat --external-ca --force-new-cluster --help --listen-addr --max-snapshots --snapshot-interval --task-history-limit" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_join() { + case "$prev" in + --advertise-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces + __docker_nospace + fi + return + ;; + --listen-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces --add 0.0.0.0 + __docker_nospace + fi + return + ;; + --availability) + COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) + return + ;; + --token) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--advertise-addr --data-path-addr --availability --help --listen-addr --token" -- "$cur" ) ) + ;; + *:) + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + ;; + esac +} + +_docker_swarm_join_token() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --quiet -q --rotate" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag ) + if [ $cword -eq $counter ]; then + COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) + fi + ;; + esac +} + +_docker_swarm_leave() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_unlock() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_unlock_key() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --quiet -q --rotate" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_update() { + case "$prev" in + --cert-expiry|--dispatcher-heartbeat|--external-ca|--max-snapshots|--snapshot-interval|--task-history-limit) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--autolock --cert-expiry --dispatcher-heartbeat --external-ca --help --max-snapshots --snapshot-interval --task-history-limit" -- "$cur" ) ) + ;; + esac +} + +_docker_node() { + local subcommands=" + demote + inspect + ls + promote + rm + ps + update + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_node_demote() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --filter role=manager + esac +} + +_docker_node_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --add self + esac +} + +_docker_node_list() { + _docker_node_ls +} + +_docker_node_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_nodes --cur "${cur##*=}" --id + return + ;; + membership) + COMPREPLY=( $( compgen -W "accepted pending" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_nodes --cur "${cur##*=}" --name + return + ;; + role) + COMPREPLY=( $( compgen -W "manager worker" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "id label membership name role" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_node_promote() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --filter role=worker + esac +} + +_docker_node_remove() { + _docker_node_rm +} + +_docker_node_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes + esac +} + +_docker_node_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running shutdown" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "desired-state id label name" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --no-resolve --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --add self + ;; + esac +} + +_docker_node_update() { + case "$prev" in + --availability) + COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) + return + ;; + --role) + COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) + return + ;; + --label-add|--label-rm) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--availability --help --label-add --label-rm --role" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--availability|--label-add|--label-rm|--role') + if [ $cword -eq $counter ]; then + __docker_complete_nodes + fi + ;; + esac +} + +_docker_pause() { + _docker_container_pause +} + +_docker_plugin() { + local subcommands=" + create + disable + enable + inspect + install + ls + push + rm + set + upgrade + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_create() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--compress --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + # reponame + return + elif [ $cword -eq $((counter + 1)) ]; then + _filedir -d + fi + ;; + esac +} + +_docker_plugin_disable() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed --filter enabled=true + fi + ;; + esac +} + +_docker_plugin_enable() { + case "$prev" in + --timeout) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --timeout" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--timeout') + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed --filter enabled=false + fi + ;; + esac +} + +_docker_plugin_inspect() { + case "$prev" in + --format|f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_plugins_installed + ;; + esac +} + +_docker_plugin_install() { + case "$prev" in + --alias) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--alias --disable --disable-content-trust=false --grant-all-permissions --help" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_list() { + _docker_plugin_ls +} + +_docker_plugin_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + capability) + COMPREPLY=( $( compgen -W "authz ipamdriver networkdriver volumedriver" -- "${cur##*=}" ) ) + return + ;; + enabled) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "capability enabled" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_push() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_remove() { + _docker_plugin_rm +} + +_docker_plugin_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_plugins_installed + ;; + esac +} + +_docker_plugin_set() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_upgrade() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--disable-content-trust --grant-all-permissions --help --skip-remote-check" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + __ltrim_colon_completions "$cur" + elif [ $cword -eq $((counter + 1)) ]; then + local plugin_images="$(__docker_plugins_installed)" + COMPREPLY=( $(compgen -S : -W "${plugin_images%:*}" -- "$cur") ) + __docker_nospace + fi + ;; + esac +} + + +_docker_port() { + _docker_container_port +} + +_docker_ps() { + _docker_container_ls +} + +_docker_pull() { + _docker_image_pull +} + +_docker_push() { + _docker_image_push +} + +_docker_rename() { + _docker_container_rename +} + +_docker_restart() { + _docker_container_restart +} + +_docker_rm() { + _docker_container_rm +} + +_docker_rmi() { + _docker_image_rm +} + +_docker_run() { + _docker_container_run +} + +_docker_save() { + _docker_image_save +} + + +_docker_secret() { + local subcommands=" + create + inspect + ls + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_create() { + case "$prev" in + --label|-l) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --label -l" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_secrets + ;; + esac +} + +_docker_secret_list() { + _docker_secret_ls +} + +_docker_secret_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_secrets --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_secrets --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "id label name" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format --filter -f --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_remove() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_secrets + ;; + esac +} + +_docker_secret_rm() { + _docker_secret_remove +} + + + +_docker_search() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + is-automated) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + is-official) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "is-automated is-official stars" -- "$cur" ) ) + __docker_nospace + return + ;; + --limit) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter --help --limit --no-trunc" -- "$cur" ) ) + ;; + esac +} + + +_docker_stack() { + local subcommands=" + deploy + ls + ps + rm + services + " + local aliases=" + down + list + remove + up + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_deploy() { + case "$prev" in + --bundle-file) + if __docker_daemon_is_experimental ; then + _filedir dab + return + fi + ;; + --compose-file|-c) + _filedir yml + return + ;; + esac + + case "$cur" in + -*) + local options="--compose-file -c --help --prune --with-registry-auth" + __docker_daemon_is_experimental && options+=" --bundle-file" + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_down() { + _docker_stack_rm +} + +_docker_stack_list() { + _docker_stack_ls +} + +_docker_stack_ls() { + case "$prev" in + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format --help" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running shutdown" -- "${cur##*=}" ) ) + return + ;; + id) + __docker_complete_stacks --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_stacks --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "id name desired-state" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --filter -f --format --help --no-resolve --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f') + if [ $cword -eq $counter ]; then + __docker_complete_stacks + fi + ;; + esac +} + +_docker_stack_remove() { + _docker_stack_rm +} + +_docker_stack_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_stacks + esac +} + +_docker_stack_services() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_services --cur "${cur##*=}" --id + return + ;; + label) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "id label name" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f|--format') + if [ $cword -eq $counter ]; then + __docker_complete_stacks + fi + ;; + esac +} + +_docker_stack_up() { + _docker_stack_deploy +} + + +_docker_start() { + _docker_container_start +} + +_docker_stats() { + _docker_container_stats +} + +_docker_stop() { + _docker_container_stop +} + + +_docker_system() { + local subcommands=" + df + events + info + prune + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_system_df() { + case "$prev" in + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format --help --verbose -v" -- "$cur" ) ) + ;; + esac +} + +_docker_system_events() { + local key=$(__docker_map_key_of_current_option '-f|--filter') + case "$key" in + container) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + daemon) + local name=$(__docker_q info | sed -n 's/^\(ID\|Name\): //p') + COMPREPLY=( $( compgen -W "$name" -- "${cur##*=}" ) ) + return + ;; + event) + COMPREPLY=( $( compgen -W " + attach + commit + connect + copy + create + delete + destroy + detach + die + disconnect + exec_create + exec_detach + exec_start + export + health_status + import + kill + load + mount + oom + pause + pull + push + reload + rename + resize + restart + save + start + stop + tag + top + unmount + unpause + untag + update + " -- "${cur##*=}" ) ) + return + ;; + image) + cur="${cur##*=}" + __docker_complete_images + return + ;; + network) + __docker_complete_networks --cur "${cur##*=}" + return + ;; + type) + COMPREPLY=( $( compgen -W "container daemon image network volume" -- "${cur##*=}" ) ) + return + ;; + volume) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "container daemon event image label network type volume" -- "$cur" ) ) + __docker_nospace + return + ;; + --since|--until) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --since --until --format" -- "$cur" ) ) + ;; + esac +} + +_docker_system_info() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_system_prune() { + case "$prev" in + --filter) + COMPREPLY=( $( compgen -W "until" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --force -f --filter --help" -- "$cur" ) ) + ;; + esac +} + + +_docker_tag() { + _docker_image_tag +} + +_docker_unpause() { + _docker_container_unpause +} + +_docker_update() { + _docker_container_update +} + +_docker_top() { + _docker_container_top +} + +_docker_version() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_create() { + case "$prev" in + --driver|-d) + __docker_complete_plugins_bundled --type Volume + return + ;; + --label|--opt|-o) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--driver -d --help --label --opt -o" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_volumes + ;; + esac +} + +_docker_volume_list() { + _docker_volume_ls +} + +_docker_volume_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + dangling) + COMPREPLY=( $( compgen -W "true false" -- "${cur##*=}" ) ) + return + ;; + driver) + __docker_complete_plugins_bundled --cur "${cur##*=}" --type Volume + return + ;; + name) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "dangling driver label name" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_remove() { + _docker_volume_rm +} + +_docker_volume_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_volumes + ;; + esac +} + +_docker_volume() { + local subcommands=" + create + inspect + ls + prune + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_wait() { + _docker_container_wait +} + +_docker() { + local previous_extglob_setting=$(shopt -p extglob) + shopt -s extglob + + local management_commands=( + container + image + network + node + plugin + secret + service + stack + system + volume + ) + + local top_level_commands=( + build + login + logout + run + search + version + ) + + local legacy_commands=( + attach + commit + cp + create + diff + events + exec + export + history + images + import + info + inspect + kill + load + logs + pause + port + ps + pull + push + rename + restart + rm + rmi + save + start + stats + stop + swarm + tag + top + unpause + update + wait + ) + + local experimental_commands=( + checkpoint + deploy + ) + + local commands=(${management_commands[*]} ${top_level_commands[*]}) + [ -z "$DOCKER_HIDE_LEGACY_COMMANDS" ] && commands+=(${legacy_commands[*]}) + + # These options are valid as global options for all client commands + # and valid as command options for `docker daemon` + local global_boolean_options=" + --debug -D + --tls + --tlsverify + " + local global_options_with_args=" + --config + --host -H + --log-level -l + --tlscacert + --tlscert + --tlskey + " + + local host config daemon_os + + COMPREPLY=() + local cur prev words cword + _get_comp_words_by_ref -n : cur prev words cword + + local command='docker' command_pos=0 subcommand_pos + local counter=1 + while [ $counter -lt $cword ]; do + case "${words[$counter]}" in + # save host so that completion can use custom daemon + --host|-H) + (( counter++ )) + host="${words[$counter]}" + ;; + # save config so that completion can use custom configuration directories + --config) + (( counter++ )) + config="${words[$counter]}" + ;; + $(__docker_to_extglob "$global_options_with_args") ) + (( counter++ )) + ;; + -*) + ;; + =) + (( counter++ )) + ;; + *) + command="${words[$counter]}" + command_pos=$counter + break + ;; + esac + (( counter++ )) + done + + local binary="${words[0]}" + if [[ $binary == ?(*/)dockerd ]] ; then + # for the dockerd binary, we reuse completion of `docker daemon`. + # dockerd does not have subcommands and global options. + command=daemon + command_pos=0 + fi + + local completions_func=_docker_${command//-/_} + declare -F $completions_func >/dev/null && $completions_func + + eval "$previous_extglob_setting" + return 0 +} + +eval "$__docker_previous_extglob_setting" +unset __docker_previous_extglob_setting + +complete -F _docker docker docker.exe dockerd dockerd.exe diff --git a/components/engine/contrib/completion/fish/docker.fish b/components/engine/contrib/completion/fish/docker.fish new file mode 100644 index 0000000000..38957b8b77 --- /dev/null +++ b/components/engine/contrib/completion/fish/docker.fish @@ -0,0 +1,409 @@ +# docker.fish - docker completions for fish shell +# +# This file is generated by gen_docker_fish_completions.py from: +# https://github.com/barnybug/docker-fish-completion +# +# To install the completions: +# mkdir -p ~/.config/fish/completions +# cp docker.fish ~/.config/fish/completions +# +# Completion supported: +# - parameters +# - commands +# - containers +# - images +# - repositories + +function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' + for i in (commandline -opc) + if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait stats + return 1 + end + end + return 0 +end + +function __fish_print_docker_containers --description 'Print a list of docker containers' -a select + switch $select + case running + docker ps -a --no-trunc --filter status=running --format "{{.ID}}\n{{.Names}}" | tr ',' '\n' + case stopped + docker ps -a --no-trunc --filter status=exited --format "{{.ID}}\n{{.Names}}" | tr ',' '\n' + case all + docker ps -a --no-trunc --format "{{.ID}}\n{{.Names}}" | tr ',' '\n' + end +end + +function __fish_print_docker_images --description 'Print a list of docker images' + docker images --format "{{.Repository}}:{{.Tag}}" | command grep -v '' +end + +function __fish_print_docker_repositories --description 'Print a list of docker repositories' + docker images --format "{{.Repository}}" | command grep -v '' | command sort | command uniq +end + +# common options +complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the Engine API. Default is cors disabled" +complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge' +complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" +complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-opt -d 'Force Docker to use specific DNS options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains' +complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set runtime execution options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)' +complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the Docker runtime' +complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.' +complete -c docker -f -n '__fish_docker_no_subcommand' -s h -l help -d 'Print usage' +complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication' +complete -c docker -f -n '__fish_docker_no_subcommand' -l insecure-registry -d 'Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-masq -d "Enable IP masquerading for bridge's IP range" +complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Enable Docker's addition of iptables rules" +complete -c docker -f -n '__fish_docker_no_subcommand' -l ipv6 -d 'Enable IPv6 networking' +complete -c docker -f -n '__fish_docker_no_subcommand' -s l -l log-level -d 'Set the logging level ("debug", "info", "warn", "error", "fatal")' +complete -c docker -f -n '__fish_docker_no_subcommand' -l label -d 'Set key=value labels to the daemon (displayed in `docker info`)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU' +complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Specify a preferred Docker registry mirror' +complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlsverify -d 'Use TLS and verify the remote (daemon: verify client, client: verify daemon)' +complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit' + +# subcommands +# attach +complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach STDIN' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" + +# build +complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s f -l file -d "Name of the Dockerfile(Default is 'Dockerfile' at context root)" +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l pull -d 'Always attempt to pull a newer version of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the build output and print image ID on success' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' + +# commit +complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith ")' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s p -l pause -d 'Pause container during commit' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" + +# cp +complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders between a container and the local filesystem" +complete -c docker -A -f -n '__fish_seen_subcommand_from cp' -l help -d 'Print usage' + +# create +complete -c docker -f -n '__fish_docker_no_subcommand' -a create -d 'Create a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-add -d 'Add Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-drop -d 'Drop Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device-cgroup-rule -d 'Add a rule to the cgroup allowed devices list (e.g. --device-cgroup-rule="c 13:37 rwm")' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns -d 'Set custom DNS servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l env-file -d 'Read in a line delimited file of environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of :alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mac-address -d 'Container MAC address (e.g., 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mount -d 'Attach a filesystem mount to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l net -d 'Set the Network mode for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l pid -d 'Default is to create a private PID namespace for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l read-only -d "Mount the container's root filesystem as read only" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l security-opt -d 'Security Options' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -a '(__fish_print_docker_images)' -d "Image" + +# diff +complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container" + +# events +complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -s f -l filter -d "Provide filter values (i.e., 'event=stop')" +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show all events created since timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l until -d 'Stream events until this timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l format -d 'Format the output using the given go template' + +# exec +complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s d -l detach -d 'Detached mode: run command in the background' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -a '(__fish_print_docker_containers running)' -d "Container" + +# export +complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container" + +# history +complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" + +# images +complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s f -l filter -d "Provide filter values (i.e., 'dangling=true')" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" + +# import +complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball' +complete -c docker -A -f -n '__fish_seen_subcommand_from import' -l help -d 'Print usage' + +# info +complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' +complete -c docker -A -f -n '__fish_seen_subcommand_from info' -s f -l format -d 'Format the output using the given go template' +complete -c docker -A -f -n '__fish_seen_subcommand_from info' -l help -d 'Print usage' + +# inspect +complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container or image' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s s -l size -d 'Display total file sizes if the type is container.' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" + +# kill +complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container" + +# load +complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from load' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from load' -s i -l input -d 'Read from a tar archive file, instead of STDIN' + +# login +complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Log in to a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' + +# logout +complete -c docker -f -n '__fish_docker_no_subcommand' -a logout -d 'Log out from a Docker registry server' + +# logs +complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l since -d 'Show logs since timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container" + +# port +complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port that is NAT-ed to PRIVATE_PORT' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container" + +# pause +complete -c docker -f -n '__fish_docker_no_subcommand' -a pause -d 'Pause all processes within a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from pause' -a '(__fish_print_docker_containers running)' -d "Container" + +# ps +complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s f -l filter -d 'Provide filter values. Valid filters:' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' + +# pull +complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s a -l all-tags -d 'Download all tagged images in the repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository" + +# push +complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository" + +# rename +complete -c docker -f -n '__fish_docker_no_subcommand' -a rename -d 'Rename an existing container' + +# restart +complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container" + +# rm +complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force the removal of a running container (uses SIGKILL)' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated with the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -a '(__fish_print_docker_containers all)' -d "Container" + +# rmi +complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force removal of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l no-prune -d 'Do not delete untagged parents' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" + +# run +complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-add -d 'Add Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-drop -d 'Drop Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: run the container in the background and print the new container ID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device-cgroup-rule -d 'Add a rule to the cgroup allowed devices list (e.g. --device-cgroup-rule="c 13:37 rwm")' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom DNS servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l env-file -d 'Read in a line delimited file of environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of :alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mac-address -d 'Container MAC address (e.g., 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mount -d 'Attach a filesystem mount to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l net -d 'Set the Network mode for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l pid -d 'Default is to create a private PID namespace for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l read-only -d "Mount the container's root filesystem as read only" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l security-opt -d 'Security Options' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l stop-signal -d 'Signal to kill a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l tmpfs -d 'Mount tmpfs on a directory' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" + +# save +complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to an file, instead of STDOUT' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image" + +# search +complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the registry (defaults to the Docker Hub)' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least x stars' + +# start +complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's STDOUT and STDERR and forward all signals to the process" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's STDIN" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container" + +# stats +complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics" +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l no-stream -d 'Disable streaming stats and only pull the first result' +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container" + +# stop +complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container" + +# tag +complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -l help -d 'Print usage' + +# top +complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container" + +# unpause +complete -c docker -f -n '__fish_docker_no_subcommand' -a unpause -d 'Unpause a paused container' +complete -c docker -A -f -n '__fish_seen_subcommand_from unpause' -a '(__fish_print_docker_containers running)' -d "Container" + +# version +complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the Docker version information' +complete -c docker -A -f -n '__fish_seen_subcommand_from version' -s f -l format -d 'Format the output using the given go template' +complete -c docker -A -f -n '__fish_seen_subcommand_from version' -l help -d 'Print usage' + +# wait +complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container" diff --git a/components/engine/contrib/completion/powershell/readme.txt b/components/engine/contrib/completion/powershell/readme.txt new file mode 100644 index 0000000000..18e1b53c13 --- /dev/null +++ b/components/engine/contrib/completion/powershell/readme.txt @@ -0,0 +1 @@ +See https://github.com/samneirinck/posh-docker \ No newline at end of file diff --git a/components/engine/contrib/completion/zsh/REVIEWERS b/components/engine/contrib/completion/zsh/REVIEWERS new file mode 100644 index 0000000000..03ee2dde3d --- /dev/null +++ b/components/engine/contrib/completion/zsh/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/components/engine/contrib/completion/zsh/_docker b/components/engine/contrib/completion/zsh/_docker new file mode 100644 index 0000000000..0860907839 --- /dev/null +++ b/components/engine/contrib/completion/zsh/_docker @@ -0,0 +1,3014 @@ +#compdef docker dockerd +# +# zsh completion for docker (http://docker.com) +# +# version: 0.3.0 +# github: https://github.com/felixr/docker-zsh-completion +# +# contributors: +# - Felix Riedel +# - Steve Durrheimer +# - Vincent Bernat +# +# license: +# +# Copyright (c) 2013, Felix Riedel +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# Short-option stacking can be enabled with: +# zstyle ':completion:*:*:docker:*' option-stacking yes +# zstyle ':completion:*:*:docker-*:*' option-stacking yes +__docker_arguments() { + if zstyle -t ":completion:${curcontext}:" option-stacking; then + print -- -s + fi +} + +__docker_get_containers() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local kind type line s + declare -a running stopped lines args names + + kind=$1; shift + type=$1; shift + [[ $kind = (stopped|all) ]] && args=($args -a) + + lines=(${(f)${:-"$(_call_program commands docker $docker_options ps --format 'table' --no-trunc $args)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 # Last column, should go to the end of the line + lines=(${lines[2,-1]}) + + # Container ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}[0,12]}" + s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = (Exit*|Created*) ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + fi + + # Names: we only display the one without slash. All other names + # are generated and may clutter the completion. However, with + # Swarm, all names may be prefixed by the swarm node name. + if [[ $type = (names|all) ]]; then + for line in $lines; do + names=(${(ps:,:)${${line[${begin[NAMES]},${end[NAMES]}]}%% *}}) + # First step: find a common prefix and strip it (swarm node case) + (( ${#${(u)names%%/*}} == 1 )) && names=${names#${names[1]%%/*}/} + # Second step: only keep the first name without a / + s=${${names:#*/*}[1]} + # If no name, well give up. + (( $#s != 0 )) || continue + s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = (Exit*|Created*) ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + fi + + [[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running "$@" && ret=0 + [[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped "$@" && ret=0 + return ret +} + +__docker_complete_stopped_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers stopped all "$@" +} + +__docker_complete_running_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers running all "$@" +} + +__docker_complete_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all all "$@" +} + +__docker_complete_containers_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all ids "$@" +} + +__docker_complete_containers_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all names "$@" +} + +__docker_complete_info_plugins() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + emulate -L zsh + setopt extendedglob + local -a plugins + plugins=(${(ps: :)${(M)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Plugins:}%%$'\n'^ *}}:# $1: *}## $1: }) + _describe -t plugins "$1 plugins" plugins && ret=0 + return ret +} + +__docker_complete_images() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a images + images=(${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}}) + _describe -t docker-images "images" images && ret=0 + __docker_complete_repositories_with_tags && ret=0 + return ret +} + +__docker_complete_repositories() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a repos + repos=(${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}%% *}[2,-1]}) + repos=(${repos#}) + _describe -t docker-repos "repositories" repos && ret=0 + return ret +} + +__docker_complete_repositories_with_tags() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a repos onlyrepos matched + declare m + repos=(${${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}[2,-1]}/ ##/:::}%% *}) + repos=(${${repos%:::}#}) + # Check if we have a prefix-match for the current prefix. + onlyrepos=(${repos%::*}) + for m in $onlyrepos; do + [[ ${PREFIX##${~~m}} != ${PREFIX} ]] && { + # Yes, complete with tags + repos=(${${repos/:::/:}/:/\\:}) + _describe -t docker-repos-with-tags "repositories with tags" repos && ret=0 + return ret + } + done + # No, only complete repositories + onlyrepos=(${${repos%:::*}/:/\\:}) + _describe -t docker-repos "repositories" onlyrepos -qS : && ret=0 + + return ret +} + +__docker_search() { + [[ $PREFIX = -* ]] && return 1 + local cache_policy + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + local searchterm cachename + searchterm="${words[$CURRENT]%/}" + cachename=_docker-search-$searchterm + + local expl + local -a result + if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ + && ! _retrieve_cache ${cachename#_}; then + _message "Searching for ${searchterm}..." + result=(${${${(f)${:-"$(_call_program commands docker $docker_options search $searchterm)"$'\n'}}%% *}[2,-1]}) + _store_cache ${cachename#_} result + fi + _wanted dockersearch expl 'available images' compadd -a result +} + +__docker_get_log_options() { + [[ $PREFIX = -* ]] && return 1 + + integer ret=1 + local log_driver=${opt_args[--log-driver]:-"all"} + local -a common_options awslogs_options fluentd_options gelf_options journald_options json_file_options logentries_options syslog_options splunk_options + + common_options=("max-buffer-size" "mode") + awslogs_options=($common_options "awslogs-region" "awslogs-group" "awslogs-stream" "awslogs-create-group") + fluentd_options=($common_options "env" "fluentd-address" "fluentd-async-connect" "fluentd-buffer-limit" "fluentd-retry-wait" "fluentd-max-retries" "labels" "tag") + gcplogs_options=($common_options "env" "gcp-log-cmd" "gcp-project" "labels") + gelf_options=($common_options "env" "gelf-address" "gelf-compression-level" "gelf-compression-type" "labels" "tag") + journald_options=($common_options "env" "labels" "tag") + json_file_options=($common_options "env" "labels" "max-file" "max-size") + logentries_options=($common_options "logentries-token") + syslog_options=($common_options "env" "labels" "syslog-address" "syslog-facility" "syslog-format" "syslog-tls-ca-cert" "syslog-tls-cert" "syslog-tls-key" "syslog-tls-skip-verify" "tag") + splunk_options=($common_options "env" "labels" "splunk-caname" "splunk-capath" "splunk-format" "splunk-gzip" "splunk-gzip-level" "splunk-index" "splunk-insecureskipverify" "splunk-source" "splunk-sourcetype" "splunk-token" "splunk-url" "splunk-verify-connection" "tag") + + [[ $log_driver = (awslogs|all) ]] && _describe -t awslogs-options "awslogs options" awslogs_options "$@" && ret=0 + [[ $log_driver = (fluentd|all) ]] && _describe -t fluentd-options "fluentd options" fluentd_options "$@" && ret=0 + [[ $log_driver = (gcplogs|all) ]] && _describe -t gcplogs-options "gcplogs options" gcplogs_options "$@" && ret=0 + [[ $log_driver = (gelf|all) ]] && _describe -t gelf-options "gelf options" gelf_options "$@" && ret=0 + [[ $log_driver = (journald|all) ]] && _describe -t journald-options "journald options" journald_options "$@" && ret=0 + [[ $log_driver = (json-file|all) ]] && _describe -t json-file-options "json-file options" json_file_options "$@" && ret=0 + [[ $log_driver = (logentries|all) ]] && _describe -t logentries-options "logentries options" logentries_options "$@" && ret=0 + [[ $log_driver = (syslog|all) ]] && _describe -t syslog-options "syslog options" syslog_options "$@" && ret=0 + [[ $log_driver = (splunk|all) ]] && _describe -t splunk-options "splunk options" splunk_options "$@" && ret=0 + + return ret +} + +__docker_complete_log_drivers() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + drivers=(awslogs etwlogs fluentd gcplogs gelf journald json-file none splunk syslog) + _describe -t log-drivers "log drivers" drivers && ret=0 + return ret +} + +__docker_complete_log_options() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (syslog-format) + local opts=('rfc3164' 'rfc5424' 'rfc5424micro') + _describe -t syslog-format-opts "syslog format options" opts && ret=0 + ;; + (mode) + local opts=('blocking' 'non-blocking') + _describe -t mode-opts "mode options" opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + __docker_get_log_options -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_detach_keys() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + compset -P "*," + keys=(${:-{a-z}}) + ctrl_keys=(${:-ctrl-{{a-z},{@,'[','\\','^',']',_}}}) + _describe -t detach_keys "[a-z]" keys -qS "," && ret=0 + _describe -t detach_keys-ctrl "'ctrl-' + 'a-z @ [ \\\\ ] ^ _'" ctrl_keys -qS "," && ret=0 +} + +__docker_complete_pid() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local -a opts vopts + + opts=('host') + vopts=('container') + + if compset -P '*:'; then + case "${${words[-1]%:*}#*=}" in + (container) + __docker_complete_running_containers && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t pid-value-opts "PID Options with value" vopts -qS ":" && ret=0 + _describe -t pid-opts "PID Options" opts && ret=0 + fi + + return ret +} + +__docker_complete_runtimes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + emulate -L zsh + setopt extendedglob + local -a runtimes_opts + runtimes_opts=(${(ps: :)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Runtimes: }%%$'\n'^ *}}}) + _describe -t runtimes-opts "runtimes options" runtimes_opts && ret=0 +} + +__docker_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (ancestor) + __docker_complete_images && ret=0 + ;; + (before|since) + __docker_complete_containers && ret=0 + ;; + (health) + health_opts=('healthy' 'none' 'starting' 'unhealthy') + _describe -t health-filter-opts "health filter options" health_opts && ret=0 + ;; + (id) + __docker_complete_containers_ids && ret=0 + ;; + (is-task) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + (name) + __docker_complete_containers_names && ret=0 + ;; + (network) + __docker_complete_networks && ret=0 + ;; + (status) + status_opts=('created' 'dead' 'exited' 'paused' 'restarting' 'running' 'removing') + _describe -t status-filter-opts "status filter options" status_opts && ret=0 + ;; + (volume) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('ancestor' 'before' 'exited' 'expose' 'health' 'id' 'label' 'name' 'network' 'publish' 'since' 'status' 'volume') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_search_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a boolean_opts opts + + boolean_opts=('true' 'false') + opts=('is-automated' 'is-official' 'stars') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (is-automated|is-official) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_images_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a boolean_opts opts + + boolean_opts=('true' 'false') + opts=('before' 'dangling' 'label' 'reference' 'since') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (before|reference|since) + __docker_complete_images && ret=0 + ;; + (dangling) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_events_filter() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a opts + + opts=('container' 'daemon' 'event' 'image' 'label' 'network' 'type' 'volume') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (container) + __docker_complete_containers && ret=0 + ;; + (daemon) + emulate -L zsh + setopt extendedglob + local -a daemon_opts + daemon_opts=( + ${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Name: }%%$'\n'^ *}} + ${${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'ID: }%%$'\n'^ *}}//:/\\:} + ) + _describe -t daemon-filter-opts "daemon filter options" daemon_opts && ret=0 + ;; + (event) + local -a event_opts + event_opts=('attach' 'commit' 'connect' 'copy' 'create' 'delete' 'destroy' 'detach' 'die' 'disconnect' 'exec_create' 'exec_detach' + 'exec_start' 'export' 'health_status' 'import' 'kill' 'load' 'mount' 'oom' 'pause' 'pull' 'push' 'reload' 'rename' 'resize' 'restart' 'save' 'start' + 'stop' 'tag' 'top' 'unmount' 'unpause' 'untag' 'update') + _describe -t event-filter-opts "event filter options" event_opts && ret=0 + ;; + (image) + __docker_complete_images && ret=0 + ;; + (network) + __docker_complete_networks && ret=0 + ;; + (type) + local -a type_opts + type_opts=('container' 'daemon' 'image' 'network' 'volume') + _describe -t type-filter-opts "type filter options" type_opts && ret=0 + ;; + (volume) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_prune_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a opts + + opts=('until') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +# BO checkpoint + +__docker_checkpoint_commands() { + local -a _docker_checkpoint_subcommands + _docker_checkpoint_subcommands=( + "create:Create a checkpoint from a running container" + "ls:List checkpoints for a container" + "rm:Remove a checkpoint" + ) + _describe -t docker-checkpoint-commands "docker checkpoint command" _docker_checkpoint_subcommands +} + +__docker_checkpoint_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--checkpoint-dir=[Use a custom checkpoint storage directory]:dir:_directories" \ + "($help)--leave-running[Leave the container running after checkpoint]" \ + "($help -)1:container:__docker_complete_running_containers" \ + "($help -)2:checkpoint: " && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--checkpoint-dir=[Use a custom checkpoint storage directory]:dir:_directories" \ + "($help -)1:container:__docker_complete_containers" && ret=0 + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--checkpoint-dir=[Use a custom checkpoint storage directory]:dir:_directories" \ + "($help -)1:container:__docker_complete_containers" \ + "($help -)2:checkpoint: " && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_checkpoint_commands" && ret=0 + ;; + esac + + return ret +} + +# EO checkpoint + +# BO container + +__docker_container_commands() { + local -a _docker_container_subcommands + _docker_container_subcommands=( + "attach:Attach to a running container" + "commit:Create a new image from a container's changes" + "cp:Copy files/folders between a container and the local filesystem" + "create:Create a new container" + "diff:Inspect changes on a container's filesystem" + "exec:Run a command in a running container" + "export:Export a container's filesystem as a tar archive" + "inspect:Display detailed information on one or more containers" + "kill:Kill one or more running containers" + "logs:Fetch the logs of a container" + "ls:List containers" + "pause:Pause all processes within one or more containers" + "port:List port mappings or a specific mapping for the container" + "prune:Remove all stopped containers" + "rename:Rename a container" + "restart:Restart one or more containers" + "rm:Remove one or more containers" + "run:Run a command in a new container" + "start:Start one or more stopped containers" + "stats:Display a live stream of container(s) resource usage statistics" + "stop:Stop one or more running containers" + "top:Display the running processes of a container" + "unpause:Unpause all processes within one or more containers" + "update:Update configuration of one or more containers" + "wait:Block until one or more containers stop, then print their exit codes" + ) + _describe -t docker-container-commands "docker container command" _docker_container_subcommands +} + +__docker_container_subcommand() { + local -a _command_args opts_help opts_attach_exec_run_start opts_create_run opts_create_run_update + local expl help="--help" + integer ret=1 + + opts_attach_exec_run_start=( + "($help)--detach-keys=[Escape key sequence used to detach a container]:sequence:__docker_complete_detach_keys" + ) + opts_create_run=( + "($help -a --attach)"{-a=,--attach=}"[Attach to stdin, stdout or stderr]:device:(STDIN STDOUT STDERR)" + "($help)*--add-host=[Add a custom host-to-IP mapping]:host\:ip mapping: " + "($help)*--blkio-weight-device=[Block IO (relative device weight)]:device:Block IO weight: " + "($help)*--cap-add=[Add Linux capabilities]:capability: " + "($help)*--cap-drop=[Drop Linux capabilities]:capability: " + "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " + "($help)--cidfile=[Write the container ID to the file]:CID file:_files" + "($help)--cpus=[Number of CPUs (default 0.000)]:cpus: " + "($help)*--device=[Add a host device to the container]:device:_files" + "($help)*--device-cgroup-rule=[Add a rule to the cgroup allowed devices list]:device:cgroup: " + "($help)*--device-read-bps=[Limit the read rate (bytes per second) from a device]:device:IO rate: " + "($help)*--device-read-iops=[Limit the read rate (IO per second) from a device]:device:IO rate: " + "($help)*--device-write-bps=[Limit the write rate (bytes per second) to a device]:device:IO rate: " + "($help)*--device-write-iops=[Limit the write rate (IO per second) to a device]:device:IO rate: " + "($help)--disable-content-trust[Skip image verification]" + "($help)*--dns=[Custom DNS servers]:DNS server: " + "($help)*--dns-option=[Custom DNS options]:DNS option: " + "($help)*--dns-search=[Custom DNS search domains]:DNS domains: " + "($help)*"{-e=,--env=}"[Environment variables]:environment variable: " + "($help)--entrypoint=[Overwrite the default entrypoint of the image]:entry point: " + "($help)*--env-file=[Read environment variables from a file]:environment file:_files" + "($help)*--expose=[Expose a port from the container without publishing it]: " + "($help)*--group=[Set one or more supplementary user groups for the container]:group:_groups" + "($help -h --hostname)"{-h=,--hostname=}"[Container host name]:hostname:_hosts" + "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" + "($help)--init[Run an init inside the container that forwards signals and reaps processes]" + "($help)--ip=[IPv4 address]:IPv4: " + "($help)--ip6=[IPv6 address]:IPv6: " + "($help)--ipc=[IPC namespace to use]:IPC namespace: " + "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)" + "($help)*--link=[Add link to another container]:link:->link" + "($help)*--link-local-ip=[Container IPv4/IPv6 link-local addresses]:IPv4/IPv6: " + "($help)*"{-l=,--label=}"[Container metadata]:label: " + "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_complete_log_drivers" + "($help)*--log-opt=[Log driver specific options]:log driver options:__docker_complete_log_options" + "($help)--mac-address=[Container MAC address]:MAC address: " + "($help)*--mount=[Attach a filesystem mount to the container]:mount: " + "($help)--name=[Container name]:name: " + "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)" + "($help)*--network-alias=[Add network-scoped alias for the container]:alias: " + "($help)--oom-kill-disable[Disable OOM Killer]" + "($help)--oom-score-adj[Tune the host's OOM preferences for containers (accepts -1000 to 1000)]" + "($help)--pids-limit[Tune container pids limit (set -1 for unlimited)]" + "($help -P --publish-all)"{-P,--publish-all}"[Publish all exposed ports]" + "($help)*"{-p=,--publish=}"[Expose a container's port to the host]:port:_ports" + "($help)--pid=[PID namespace to use]:PID namespace:__docker_complete_pid" + "($help)--privileged[Give extended privileges to this container]" + "($help)--read-only[Mount the container's root filesystem as read only]" + "($help)*--security-opt=[Security options]:security option: " + "($help)*--shm-size=[Size of '/dev/shm' (format is '')]:shm size: " + "($help)--stop-signal=[Signal to kill a container]:signal:_signals" + "($help)--stop-timeout=[Timeout (in seconds) to stop a container]:time: " + "($help)*--sysctl=-[sysctl options]:sysctl: " + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" + "($help)*--ulimit=[ulimit options]:ulimit: " + "($help)--userns=[Container user namespace]:user namespace:(host)" + "($help)--tmpfs[mount tmpfs]" + "($help)*-v[Bind mount a volume]:volume: " + "($help)--volume-driver=[Optional volume driver for the container]:volume driver:(local)" + "($help)*--volumes-from=[Mount volumes from the specified container]:volume: " + "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" + ) + opts_create_run_update=( + "($help)--blkio-weight=[Block IO (relative weight), between 10 and 1000]:Block IO weight:(10 100 500 1000)" + "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" + "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " + "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " + "($help)--cpu-rt-period=[Limit the CPU real-time period]:CPU real-time period in microseconds: " + "($help)--cpu-rt-runtime=[Limit the CPU real-time runtime]:CPU real-time runtime in microseconds: " + "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " + "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " + "($help)--kernel-memory=[Kernel memory limit in bytes]:Memory limit: " + "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " + "($help)--memory-reservation=[Memory soft limit]:Memory limit: " + "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " + "($help)--restart=[Restart policy]:restart policy:(no on-failure always unless-stopped)" + ) + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (attach) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help)--no-stdin[Do not attach stdin]" \ + "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ + "($help -):containers:__docker_complete_running_containers" && ret=0 + ;; + (commit) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --author)"{-a=,--author=}"[Author]:author: " \ + "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ + "($help -m --message)"{-m=,--message=}"[Commit message]:message: " \ + "($help -p --pause)"{-p,--pause}"[Pause container during commit]" \ + "($help -):container:__docker_complete_containers" \ + "($help -): :__docker_complete_repositories_with_tags" && ret=0 + ;; + (cp) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -L --follow-link)"{-L,--follow-link}"[Always follow symbol link]" \ + "($help -)1:container:->container" \ + "($help -)2:hostpath:_files" && ret=0 + case $state in + (container) + if compset -P "*:"; then + _files && ret=0 + else + __docker_complete_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (create) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_run \ + $opts_create_run_update \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (diff) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (exec) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ + "($help)*"{-e=,--env=}"[Set environment variables]:environment variable: " \ + "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" \ + "($help)--privileged[Give extended Linux capabilities to the command]" \ + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" \ + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" \ + "($help -):containers:__docker_complete_running_containers" \ + "($help -)*::command:->anycommand" && ret=0 + case $state in + (anycommand) + shift 1 words + (( CURRENT-- )) + _normal && ret=0 + ;; + esac + ;; + (export) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -o --output)"{-o=,--output=}"[Write to a file, instead of stdout]:output file:_files" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -s --size)"{-s,--size}"[Display total file sizes]" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (kill) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -s --signal)"{-s=,--signal=}"[Signal to send]:signal:_signals" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (logs) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--details[Show extra details provided to logs]" \ + "($help -f --follow)"{-f,--follow}"[Follow log output]" \ + "($help -s --since)"{-s=,--since=}"[Show logs since this timestamp]:timestamp: " \ + "($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \ + "($help)--tail=[Output the last K lines]:lines:(1 10 20 50 all)" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all containers]" \ + "($help)--before=[Show only container created before...]:containers:__docker_complete_containers" \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_ps_filters" \ + "($help)--format=[Pretty-print containers using a Go template]:template: " \ + "($help -l --latest)"{-l,--latest}"[Show only the latest created container]" \ + "($help -n --last)"{-n=,--last=}"[Show n last created containers (includes all states)]:n:(1 5 10 25 50)" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -s --size)"{-s,--size}"[Display total file sizes]" \ + "($help)--since=[Show only containers created since...]:containers:__docker_complete_containers" && ret=0 + ;; + (pause|unpause) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (port) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:containers:__docker_complete_running_containers" \ + "($help -)2:port:_ports" && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rename) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):old name:__docker_complete_containers" \ + "($help -):new name: " && ret=0 + ;; + (restart) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ + "($help -)*:containers:__docker_complete_containers_ids" && ret=0 + ;; + (rm) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force removal]" \ + "($help -l --link)"{-l,--link}"[Remove the specified link and not the underlying container]" \ + "($help -v --volumes)"{-v,--volumes}"[Remove the volumes associated to the container]" \ + "($help -)*:containers:->values" && ret=0 + case $state in + (values) + if [[ ${words[(r)-f]} == -f || ${words[(r)--force]} == --force ]]; then + __docker_complete_containers && ret=0 + else + __docker_complete_stopped_containers && ret=0 + fi + ;; + esac + ;; + (run) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_run \ + $opts_create_run_update \ + $opts_attach_exec_run_start \ + "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ + "($help)--health-cmd=[Command to run to check health]:command: " \ + "($help)--health-interval=[Time between running the check]:time: " \ + "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" \ + "($help)--health-timeout=[Maximum time to allow one check to run]:time: " \ + "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" \ + "($help)--rm[Remove intermediate containers when it exits]" \ + "($help)--runtime=[Name of the runtime to be used for that container]:runtime:__docker_complete_runtimes" \ + "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ + "($help)--storage-opt=[Storage driver options for the container]:storage options:->storage-opt" \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + (storage-opt) + if compset -P "*="; then + _message "value" && ret=0 + else + opts=('size') + _describe -t filter-opts "storage options" opts -qS "=" && ret=0 + fi + ;; + esac + ;; + (start) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help -a --attach)"{-a,--attach}"[Attach container's stdout/stderr and forward all signals]" \ + "($help -i --interactive)"{-i,--interactive}"[Attach container's stding]" \ + "($help -)*:containers:__docker_complete_stopped_containers" && ret=0 + ;; + (stats) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all containers (default shows just running)]" \ + "($help)--format=[Pretty-print images using a Go template]:template: " \ + "($help)--no-stream[Disable streaming stats and only pull the first result]" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (stop) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (top) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:containers:__docker_complete_running_containers" \ + "($help -)*:: :->ps-arguments" && ret=0 + case $state in + (ps-arguments) + _ps && ret=0 + ;; + esac + ;; + (update) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + opts_create_run_update \ + "($help -)*: :->values" && ret=0 + case $state in + (values) + if [[ ${words[(r)--kernel-memory*]} = (--kernel-memory*) ]]; then + __docker_complete_stopped_containers && ret=0 + else + __docker_complete_containers && ret=0 + fi + ;; + esac + ;; + (wait) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_container_commands" && ret=0 + ;; + esac + + return ret +} + +# EO container + +# BO image + +__docker_image_commands() { + local -a _docker_image_subcommands + _docker_image_subcommands=( + "build:Build an image from a Dockerfile" + "history:Show the history of an image" + "import:Import the contents from a tarball to create a filesystem image" + "inspect:Display detailed information on one or more images" + "load:Load an image from a tar archive or STDIN" + "ls:List images" + "prune:Remove unused images" + "pull:Pull an image or a repository from a registry" + "push:Push an image or a repository to a registry" + "rm:Remove one or more images" + "save:Save one or more images to a tar archive (streamed to STDOUT by default)" + "tag:Tag an image into a repository" + ) + _describe -t docker-image-commands "docker image command" _docker_image_subcommands +} + +__docker_image_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (build) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--add-host=[Add a custom host-to-IP mapping]:host\:ip mapping: " \ + "($help)*--build-arg=[Build-time variables]:=: " \ + "($help)*--cache-from=[Images to consider as cache sources]: :__docker_complete_repositories_with_tags" \ + "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" \ + "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " \ + "($help)--compress[Compress the build context using gzip]" \ + "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " \ + "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " \ + "($help)--cpu-rt-period=[Limit the CPU real-time period]:CPU real-time period in microseconds: " \ + "($help)--cpu-rt-runtime=[Limit the CPU real-time runtime]:CPU real-time runtime in microseconds: " \ + "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " \ + "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " \ + "($help)--disable-content-trust[Skip image verification]" \ + "($help -f --file)"{-f=,--file=}"[Name of the Dockerfile]:Dockerfile:_files" \ + "($help)--force-rm[Always remove intermediate containers]" \ + "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)" \ + "($help)*--label=[Set metadata for an image]:label=value: " \ + "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " \ + "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " \ + "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)" \ + "($help)--no-cache[Do not use cache when building the image]" \ + "($help)--pull[Attempt to pull a newer version of the image]" \ + "($help -q --quiet)"{-q,--quiet}"[Suppress verbose build output]" \ + "($help)--rm[Remove intermediate containers after a successful build]" \ + "($help)*--shm-size=[Size of '/dev/shm' (format is '')]:shm size: " \ + "($help)--squash[Squash newly built layers into a single new layer]" \ + "($help -t --tag)*"{-t=,--tag=}"[Repository, name and tag for the image]: :__docker_complete_repositories_with_tags" \ + "($help)*--ulimit=[ulimit options]:ulimit: " \ + "($help)--userns=[Container user namespace]:user namespace:(host)" \ + "($help -):path or URL:_directories" && ret=0 + ;; + (history) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -H --human)"{-H,--human}"[Print sizes and dates in human readable format]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (import) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ + "($help -m --message)"{-m=,--message=}"[Commit message for imported image]:message: " \ + "($help -):URL:(- http:// file://)" \ + "($help -): :__docker_complete_repositories_with_tags" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -)*:images:__docker_complete_images" && ret=0 + ;; + (load) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -i --input)"{-i=,--input=}"[Read from tar archive file]:archive file:_files -g \"*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)\"" \ + "($help -q --quiet)"{-q,--quiet}"[Suppress the load output]" && ret=0 + ;; + (ls|list) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all images]" \ + "($help)--digests[Show digests]" \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_images_filters" \ + "($help)--format=[Pretty-print images using a Go template]:template: " \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -): :__docker_complete_repositories" && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Remove all unused images, not just dangling ones]" \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (pull) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all-tags)"{-a,--all-tags}"[Download all tagged images]" \ + "($help)--disable-content-trust[Skip image verification]" \ + "($help -):name:__docker_search" && ret=0 + ;; + (push) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--disable-content-trust[Skip image signing]" \ + "($help -): :__docker_complete_images" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force removal]" \ + "($help)--no-prune[Do not delete untagged parents]" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (save) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -o --output)"{-o=,--output=}"[Write to file]:file:_files" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (tag) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):source:__docker_complete_images"\ + "($help -):destination:__docker_complete_repositories_with_tags" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_container_commands" && ret=0 + ;; + esac + + return ret +} + +# EO image + +# BO network + +__docker_network_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (driver) + __docker_complete_info_plugins Network && ret=0 + ;; + (id) + __docker_complete_networks_ids && ret=0 + ;; + (name) + __docker_complete_networks_names && ret=0 + ;; + (scope) + opts=('global' 'local' 'swarm') + _describe -t scope-filter-opts "Scope filter options" opts && ret=0 + ;; + (type) + opts=('builtin' 'custom') + _describe -t type-filter-opts "Type filter options" opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('driver' 'id' 'label' 'name' 'scope' 'type') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_get_networks() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines networks + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options network ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Network ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[NETWORK ID]},${end[NETWORK ID]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}" + networks=($networks $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}" + networks=($networks $s) + done + fi + + _describe -t networks-list "networks" networks "$@" && ret=0 + return ret +} + +__docker_complete_networks() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks all "$@" +} + +__docker_complete_networks_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks ids "$@" +} + +__docker_complete_networks_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks names "$@" +} + +__docker_network_commands() { + local -a _docker_network_subcommands + _docker_network_subcommands=( + "connect:Connect a container to a network" + "create:Creates a new network with a name specified by the user" + "disconnect:Disconnects a container from a network" + "inspect:Displays detailed information on a network" + "ls:Lists all the networks created by the user" + "prune:Remove all unused networks" + "rm:Deletes one or more networks" + ) + _describe -t docker-network-commands "docker network command" _docker_network_subcommands +} + +__docker_network_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (connect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--alias=[Add network-scoped alias for the container]:alias: " \ + "($help)--ip=[IPv4 address]:IPv4: " \ + "($help)--ip6=[IPv6 address]:IPv6: " \ + "($help)*--link=[Add a link to another container]:link:->link" \ + "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " \ + "($help -)1:network:__docker_complete_networks" \ + "($help -)2:containers:__docker_complete_containers" && ret=0 + + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)--attachable[Enable manual container attachment]" \ + "($help)*--aux-address[Auxiliary IPv4 or IPv6 addresses used by network driver]:key=IP: " \ + "($help -d --driver)"{-d=,--driver=}"[Driver to manage the Network]:driver:(null host bridge overlay)" \ + "($help)*--gateway=[IPv4 or IPv6 Gateway for the master subnet]:IP: " \ + "($help)--internal[Restricts external access to the network]" \ + "($help)*--ip-range=[Allocate container ip from a sub-range]:IP/mask: " \ + "($help)--ipam-driver=[IP Address Management Driver]:driver:(default)" \ + "($help)*--ipam-opt=[Custom IPAM plugin options]:opt=value: " \ + "($help)--ipv6[Enable IPv6 networking]" \ + "($help)*--label=[Set metadata on a network]:label=value: " \ + "($help)*"{-o=,--opt=}"[Driver specific options]:opt=value: " \ + "($help)*--subnet=[Subnet in CIDR format that represents a network segment]:IP/mask: " \ + "($help -)1:Network Name: " && ret=0 + ;; + (disconnect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:network:__docker_complete_networks" \ + "($help -)2:containers:__docker_complete_containers" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help)--verbose[Show detailed information]" \ + "($help -)*:network:__docker_complete_networks" && ret=0 + ;; + (ls) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--no-trunc[Do not truncate the output]" \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:__docker_network_complete_ls_filters" \ + "($help)--format=[Pretty-print networks using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display numeric IDs]" && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:network:__docker_complete_networks" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 + ;; + esac + + return ret +} + +# EO network + +# BO node + +__docker_node_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (id) + __docker_complete_nodes_ids && ret=0 + ;; + (membership) + membership_opts=('accepted' 'pending' 'rejected') + _describe -t membership-opts "membership options" membership_opts && ret=0 + ;; + (name) + __docker_complete_nodes_names && ret=0 + ;; + (role) + role_opts=('manager' 'worker') + _describe -t role-opts "role options" role_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'membership' 'name' 'role') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_node_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running' 'shutdown') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_nodes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines nodes args + + type=$1; shift + filter=$1; shift + [[ $filter != "none" ]] && args=("-f $filter") + + lines=(${(f)${:-"$(_call_program commands docker $docker_options node ls $args)"$'\n'}}) + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Node ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + nodes=($nodes $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + nodes=($nodes $s) + done + fi + + _describe -t nodes-list "nodes" nodes "$@" && ret=0 + return ret +} + +__docker_complete_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all none "$@" +} + +__docker_complete_nodes_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes ids none "$@" +} + +__docker_complete_nodes_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes names none "$@" +} + +__docker_complete_pending_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "membership=pending" "$@" +} + +__docker_complete_manager_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "role=manager" "$@" +} + +__docker_complete_worker_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "role=worker" "$@" +} + +__docker_node_commands() { + local -a _docker_node_subcommands + _docker_node_subcommands=( + "demote:Demote a node as manager in the swarm" + "inspect:Display detailed information on one or more nodes" + "ls:List nodes in the swarm" + "promote:Promote a node as manager in the swarm" + "rm:Remove one or more nodes from the swarm" + "ps:List tasks running on one or more nodes, defaults to current node" + "update:Update a node" + ) + _describe -t docker-node-commands "docker node command" _docker_node_subcommands +} + +__docker_node_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force remove a node from the swarm]" \ + "($help -)*:node:__docker_complete_pending_nodes" && ret=0 + ;; + (demote) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:node:__docker_complete_manager_nodes" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help)--pretty[Print the information in a human friendly format]" \ + "($help -)*:node:__docker_complete_nodes" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:__docker_node_complete_ls_filters" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + ;; + (promote) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:node:__docker_complete_worker_nodes" && ret=0 + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Display all instances]" \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:__docker_node_complete_ps_filters" \ + "($help)--format=[Format the output using the given go template]:template: " \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" \ + "($help -)*:node:__docker_complete_nodes" && ret=0 + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--availability=[Availability of the node]:availability:(active pause drain)" \ + "($help)*--label-add=[Add or update a node label]:key=value: " \ + "($help)*--label-rm=[Remove a node label if exists]:label: " \ + "($help)--role=[Role of the node]:role:(manager worker)" \ + "($help -)1:node:__docker_complete_nodes" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_node_commands" && ret=0 + ;; + esac + + return ret +} + +# EO node + +# BO plugin + +__docker_plugin_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (capability) + opts=('authz' 'ipamdriver' 'networkdriver' 'volumedriver') + _describe -t capability-opts "capability options" opts && ret=0 + ;; + (enabled) + opts=('false' 'true') + _describe -t enabled-opts "enabled options" opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('capability' 'enabled') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_plugins() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines plugins args + + filter=$1; shift + [[ $filter != "none" ]] && args=("-f $filter") + + lines=(${(f)${:-"$(_call_program commands docker $docker_options plugin ls $args)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Name + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[TAG]},${end[TAG]}]}%% ##}}" + plugins=($plugins $s) + done + + _describe -t plugins-list "plugins" plugins "$@" && ret=0 + return ret +} + +__docker_complete_plugins() { + [[ $PREFIX = -* ]] && return 1 + __docker_plugins none "$@" +} + +__docker_complete_enabled_plugins() { + [[ $PREFIX = -* ]] && return 1 + __docker_plugins enabled=true "$@" +} + +__docker_complete_disabled_plugins() { + [[ $PREFIX = -* ]] && return 1 + __docker_plugins enabled=false "$@" +} + +__docker_plugin_commands() { + local -a _docker_plugin_subcommands + _docker_plugin_subcommands=( + "disable:Disable a plugin" + "enable:Enable a plugin" + "inspect:Return low-level information about a plugin" + "install:Install a plugin" + "ls:List plugins" + "push:Push a plugin" + "rm:Remove a plugin" + "set:Change settings for a plugin" + "upgrade:Upgrade an existing plugin" + ) + _describe -t docker-plugin-commands "docker plugin command" _docker_plugin_subcommands +} + +__docker_plugin_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (disable) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force the disable of an active plugin]" \ + "($help -)1:plugin:__docker_complete_enabled_plugins" && ret=0 + ;; + (enable) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--timeout=[HTTP client timeout (in seconds)]:timeout: " \ + "($help -)1:plugin:__docker_complete_disabled_plugins" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given Go template]:template: " \ + "($help -)*:plugin:__docker_complete_plugins" && ret=0 + ;; + (install) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--alias=[Local name for plugin]:alias: " \ + "($help)--disable[Do not enable the plugin on install]" \ + "($help)--disable-content-trust[Skip image verification (default true)]" \ + "($help)--grant-all-permissions[Grant all permissions necessary to run the plugin]" \ + "($help -)1:plugin:__docker_complete_plugins" \ + "($help -)*:key=value: " && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_plugin_complete_ls_filters" \ + "($help --format)--format=[Format the output using the given Go template]:template: " \ + "($help)--no-trunc[Don't truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + ;; + (push) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--disable-content-trust[Skip image verification (default true)]" \ + "($help -)1:plugin:__docker_complete_plugins" && ret=0 + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force the removal of an active plugin]" \ + "($help -)*:plugin:__docker_complete_plugins" && ret=0 + ;; + (set) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:plugin:__docker_complete_plugins" \ + "($help -)*:key=value: " && ret=0 + ;; + (upgrade) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--disable-content-trust[Skip image verification (default true)]" \ + "($help)--grant-all-permissions[Grant all permissions necessary to run the plugin]" \ + "($help)--skip-remote-check[Do not check if specified remote plugin matches existing plugin image]" \ + "($help -)1:plugin:__docker_complete_plugins" \ + "($help -):remote: " && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_plugin_commands" && ret=0 + ;; + esac + + return ret +} + +# EO plugin + +# BO secret + +__docker_secrets() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines secrets + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options secret ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + secrets=($secrets $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + secrets=($secrets $s) + done + fi + + _describe -t secrets-list "secrets" secrets "$@" && ret=0 + return ret +} + +__docker_complete_secrets() { + [[ $PREFIX = -* ]] && return 1 + __docker_secrets all "$@" +} + +__docker_secret_commands() { + local -a _docker_secret_subcommands + _docker_secret_subcommands=( + "create:Create a secret using stdin as content" + "inspect:Display detailed information on one or more secrets" + "ls:List secrets" + "rm:Remove one or more secrets" + ) + _describe -t docker-secret-commands "docker secret command" _docker_secret_subcommands +} + +__docker_secret_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)*"{-l=,--label=}"[Secret labels]:label: " \ + "($help -):secret: " && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given Go template]:template: " \ + "($help -)*:secret:__docker_complete_secrets" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--format=[Format the output using the given go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:secret:__docker_complete_secrets" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_secret_commands" && ret=0 + ;; + esac + + return ret +} + +# EO secret + +# BO service + +__docker_service_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (id) + __docker_complete_services_ids && ret=0 + ;; + (mode) + opts=('global' 'replicated') + _describe -t mode-opts "mode options" opts && ret=0 + ;; + (name) + __docker_complete_services_names && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'mode' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_service_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running' 'shutdown') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_service_complete_placement_pref() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (spread) + opts=('engine.labels' 'node.labels') + _describe -t spread-opts "spread options" opts -qS "." && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('spread') + _describe -t pref-opts "placement pref options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_services() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines services + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options service ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Service ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" + services=($services $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" + services=($services $s) + done + fi + + _describe -t services-list "services" services "$@" && ret=0 + return ret +} + +__docker_complete_services() { + [[ $PREFIX = -* ]] && return 1 + __docker_services all "$@" +} + +__docker_complete_services_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_services ids "$@" +} + +__docker_complete_services_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_services names "$@" +} + +__docker_service_commands() { + local -a _docker_service_subcommands + _docker_service_subcommands=( + "create:Create a new service" + "inspect:Display detailed information on one or more services" + "logs:Fetch the logs of a service or task" + "ls:List services" + "rm:Remove one or more services" + "scale:Scale one or multiple replicated services" + "ps:List the tasks of a service" + "update:Update a service" + ) + _describe -t docker-service-commands "docker service command" _docker_service_subcommands +} + +__docker_service_subcommand() { + local -a _command_args opts_help opts_create_update + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + opts_create_update=( + "($help)*--constraint=[Placement constraints]:constraint: " + "($help)--endpoint-mode=[Placement constraints]:mode:(dnsrr vip)" + "($help)*"{-e=,--env=}"[Set environment variables]:env: " + "($help)--health-cmd=[Command to run to check health]:command: " + "($help)--health-interval=[Time between running the check]:time: " + "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" + "($help)--health-timeout=[Maximum time to allow one check to run]:time: " + "($help)--hostname=[Service container hostname]:hostname: " \ + "($help)*--label=[Service labels]:label: " + "($help)--limit-cpu=[Limit CPUs]:value: " + "($help)--limit-memory=[Limit Memory]:value: " + "($help)--log-driver=[Logging driver for service]:logging driver:__docker_complete_log_drivers" + "($help)*--log-opt=[Logging driver options]:log driver options:__docker_complete_log_options" + "($help)*--mount=[Attach a filesystem mount to the service]:mount: " + "($help)*--network=[Network attachments]:network: " + "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" + "($help)*"{-p=,--publish=}"[Publish a port as a node port]:port: " + "($help)--read-only[Mount the container's root filesystem as read only]" + "($help)--replicas=[Number of tasks]:replicas: " + "($help)--reserve-cpu=[Reserve CPUs]:value: " + "($help)--reserve-memory=[Reserve Memory]:value: " + "($help)--restart-condition=[Restart when condition is met]:mode:(any none on-failure)" + "($help)--restart-delay=[Delay between restart attempts]:delay: " + "($help)--restart-max-attempts=[Maximum number of restarts before giving up]:max-attempts: " + "($help)--restart-window=[Window used to evaluate the restart policy]:duration: " + "($help)--rollback-delay=[Delay between task rollbacks]:duration: " + "($help)--rollback-failure-action=[Action on rollback failure]:action:(continue pause)" + "($help)--rollback-max-failure-ratio=[Failure rate to tolerate during a rollback]:failure rate: " + "($help)--rollback-monitor=[Duration after each task rollback to monitor for failure]:duration: " + "($help)--rollback-parallelism=[Maximum number of tasks rolled back simultaneously]:number: " + "($help)*--secret=[Specify secrets to expose to the service]:secret:__docker_complete_secrets" + "($help)--stop-grace-period=[Time to wait before force killing a container]:grace period: " + "($help)--stop-signal=[Signal to stop the container]:signal:_signals" + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-TTY]" + "($help)--update-delay=[Delay between updates]:delay: " + "($help)--update-failure-action=[Action on update failure]:mode:(continue pause rollback)" + "($help)--update-max-failure-ratio=[Failure rate to tolerate during an update]:fraction: " + "($help)--update-monitor=[Duration after each task update to monitor for failure]:window: " + "($help)--update-parallelism=[Maximum number of tasks updated simultaneously]:number: " + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" + "($help)--with-registry-auth[Send registry authentication details to swarm agents]" + "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" + ) + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_update \ + "($help)*--container-label=[Container labels]:label: " \ + "($help)*--dns=[Set custom DNS servers]:DNS: " \ + "($help)*--dns-option=[Set DNS options]:DNS option: " \ + "($help)*--dns-search=[Set custom DNS search domains]:DNS search: " \ + "($help)*--env-file=[Read environment variables from a file]:environment file:_files" \ + "($help)--mode=[Service Mode]:mode:(global replicated)" \ + "($help)--name=[Service name]:name: " \ + "($help)*--placement-pref=[Add a placement preference]:pref:__docker_service_complete_placement_pref" \ + "($help)*--publish=[Publish a port]:port: " \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help)--pretty[Print the information in a human friendly format]" \ + "($help -)*:service:__docker_complete_services" && ret=0 + ;; + (logs) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --follow)"{-f,--follow}"[Follow log output]" \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-task-ids[Do not include task IDs]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help)--since=[Show logs since timestamp]:timestamp: " \ + "($help)--tail=[Number of lines to show from the end of the logs]:lines:(1 10 20 50 all)" \ + "($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \ + "($help -)1:service:__docker_complete_services" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_service_complete_ls_filters" \ + "($help)--format=[Pretty-print services using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:service:__docker_complete_services" && ret=0 + ;; + (scale) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:service:->values" && ret=0 + case $state in + (values) + if compset -P '*='; then + _message 'replicas' && ret=0 + else + __docker_complete_services -qS "=" + fi + ;; + esac + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:__docker_service_complete_ps_filters" \ + "($help)--format=[Format the output using the given go template]:template: " \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only display task IDs]" \ + "($help -)*:service:__docker_complete_services" && ret=0 + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_update \ + "($help)--arg=[Service command args]:arguments: _normal" \ + "($help)*--container-label-add=[Add or update container labels]:label: " \ + "($help)*--container-label-rm=[Remove a container label by its key]:label: " \ + "($help)*--dns-add=[Add or update custom DNS servers]:DNS: " \ + "($help)*--dns-rm=[Remove custom DNS servers]:DNS: " \ + "($help)*--dns-option-add=[Add or update DNS options]:DNS option: " \ + "($help)*--dns-option-rm=[Remove DNS options]:DNS option: " \ + "($help)*--dns-search-add=[Add or update custom DNS search domains]:DNS search: " \ + "($help)*--dns-search-rm=[Remove DNS search domains]:DNS search: " \ + "($help)--force[Force update]" \ + "($help)*--group-add=[Add additional supplementary user groups to the container]:group:_groups" \ + "($help)*--group-rm=[Remove previously added supplementary user groups from the container]:group:_groups" \ + "($help)--image=[Service image tag]:image:__docker_complete_repositories" \ + "($help)*--placement-pref-add=[Add a placement preference]:pref:__docker_service_complete_placement_pref" \ + "($help)*--placement-pref-rm=[Remove a placement preference]:pref:__docker_service_complete_placement_pref" \ + "($help)*--publish-add=[Add or update a port]:port: " \ + "($help)*--publish-rm=[Remove a port(target-port mandatory)]:port: " \ + "($help)--rollback[Rollback to previous specification]" \ + "($help -)1:service:__docker_complete_services" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_service_commands" && ret=0 + ;; + esac + + return ret +} + +# EO service + +# BO stack + +__docker_stack_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running' 'shutdown') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_stack_complete_services_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_stacks() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines stacks + + lines=(${(f)${:-"$(_call_program commands docker $docker_options stack ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Service ID + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + stacks=($stacks $s) + done + + _describe -t stacks-list "stacks" stacks "$@" && ret=0 + return ret +} + +__docker_complete_stacks() { + [[ $PREFIX = -* ]] && return 1 + __docker_stacks "$@" +} + +__docker_stack_commands() { + local -a _docker_stack_subcommands + _docker_stack_subcommands=( + "deploy:Deploy a new stack or update an existing stack" + "ls:List stacks" + "ps:List the tasks in the stack" + "rm:Remove the stack" + "services:List the services in the stack" + ) + _describe -t docker-stack-commands "docker stack command" _docker_stack_subcommands +} + +__docker_stack_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (deploy|up) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--bundle-file=[Path to a Distributed Application Bundle file]:dab:_files -g \"*.dab\"" \ + "($help -c --compose-file)"{-c=,--compose-file=}"[Path to a Compose file]:compose file:_files -g \"*.(yml|yaml)\"" \ + "($help)--with-registry-auth[Send registry authentication details to Swarm agents]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help && ret=0 + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Display all tasks]" \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_stack_complete_ps_filters" \ + "($help)--format=[Format the output using the given go template]:template: " \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only display task IDs]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (rm|remove|down) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (services) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_stack_complete_services_filters" \ + "($help)--format=[Pretty-print services using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_stack_commands" && ret=0 + ;; + esac + + return ret +} + +# EO stack + +# BO swarm + +__docker_swarm_commands() { + local -a _docker_swarm_subcommands + _docker_swarm_subcommands=( + "init:Initialize a swarm" + "join:Join a swarm as a node and/or manager" + "join-token:Manage join tokens" + "leave:Leave a swarm" + "unlock:Unlock swarm" + "unlock-key:Manage the unlock key" + "update:Update the swarm" + ) + _describe -t docker-swarm-commands "docker swarm command" _docker_swarm_subcommands +} + +__docker_swarm_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (init) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--advertise-addr=[Advertised address]:ip\:port: " \ + "($help)--data-path-addr=[Data path IP or interface]:ip " \ + "($help)--autolock[Enable manager autolocking]" \ + "($help)--availability=[Availability of the node]:availability:(active drain pause)" \ + "($help)--cert-expiry=[Validity period for node certificates]:duration: " \ + "($help)--dispatcher-heartbeat=[Dispatcher heartbeat period]:duration: " \ + "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ + "($help)--force-new-cluster[Force create a new cluster from current state]" \ + "($help)--listen-addr=[Listen address]:ip\:port: " \ + "($help)--max-snapshots[Number of additional Raft snapshots to retain]" \ + "($help)--snapshot-interval[Number of log entries between Raft snapshots]" \ + "($help)--task-history-limit=[Task history retention limit]:limit: " && ret=0 + ;; + (join) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)--advertise-addr=[Advertised address]:ip\:port: " \ + "($help)--data-path-addr=[Data path IP or interface]:ip " \ + "($help)--availability=[Availability of the node]:availability:(active drain pause)" \ + "($help)--listen-addr=[Listen address]:ip\:port: " \ + "($help)--token=[Token for entry into the swarm]:secret: " \ + "($help -):host\:port: " && ret=0 + ;; + (join-token) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -q --quiet)"{-q,--quiet}"[Only display token]" \ + "($help)--rotate[Rotate join token]" \ + "($help -):role:(manager worker)" && ret=0 + ;; + (leave) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force this node to leave the swarm, ignoring warnings]" && ret=0 + ;; + (unlock) + _arguments $(__docker_arguments) \ + $opts_help && ret=0 + ;; + (unlock-key) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -q --quiet)"{-q,--quiet}"[Only display token]" \ + "($help)--rotate[Rotate unlock token]" && ret=0 + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--autolock[Enable manager autolocking]" \ + "($help)--cert-expiry=[Validity period for node certificates]:duration: " \ + "($help)--dispatcher-heartbeat=[Dispatcher heartbeat period]:duration: " \ + "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ + "($help)--max-snapshots[Number of additional Raft snapshots to retain]" \ + "($help)--snapshot-interval[Number of log entries between Raft snapshots]" \ + "($help)--task-history-limit=[Task history retention limit]:limit: " && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 + ;; + esac + + return ret +} + +# EO swarm + +# BO system + +__docker_system_commands() { + local -a _docker_system_subcommands + _docker_system_subcommands=( + "df:Show docker filesystem usage" + "events:Get real time events from the server" + "info:Display system-wide information" + "prune:Remove unused data" + ) + _describe -t docker-system-commands "docker system command" _docker_system_subcommands +} + +__docker_system_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (df) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -v --verbose)"{-v,--verbose}"[Show detailed information on space usage]" && ret=0 + ;; + (events) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_events_filter" \ + "($help)--since=[Events created since this timestamp]:timestamp: " \ + "($help)--until=[Events created until this timestamp]:timestamp: " \ + "($help)--format=[Format the output using the given go template]:template: " && ret=0 + ;; + (info) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Remove all unused data, not just dangling ones]" \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 + ;; + esac + + return ret +} + +# EO system + +# BO volume + +__docker_volume_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (dangling) + dangling_opts=('true' 'false') + _describe -t dangling-filter-opts "Dangling Filter Options" dangling_opts && ret=0 + ;; + (driver) + __docker_complete_info_plugins Volume && ret=0 + ;; + (name) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('dangling' 'driver' 'label' 'name') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_volumes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a lines volumes + + lines=(${(f)${:-"$(_call_program commands docker $docker_options volume ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Names + local line s + for line in $lines; do + s="${line[${begin[VOLUME NAME]},${end[VOLUME NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + volumes=($volumes $s) + done + + _describe -t volumes-list "volumes" volumes && ret=0 + return ret +} + +__docker_volume_commands() { + local -a _docker_volume_subcommands + _docker_volume_subcommands=( + "create:Create a volume" + "inspect:Display detailed information on one or more volumes" + "ls:List volumes" + "prune:Remove all unused volumes" + "rm:Remove one or more volumes" + ) + _describe -t docker-volume-commands "docker volume command" _docker_volume_subcommands +} + +__docker_volume_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -d --driver)"{-d=,--driver=}"[Volume driver name]:Driver name:(local)" \ + "($help)*--label=[Set metadata for a volume]:label=value: " \ + "($help)*"{-o=,--opt=}"[Driver specific options]:Driver option: " \ + "($help -)1:Volume name: " && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -)1:volume:__docker_complete_volumes" && ret=0 + ;; + (ls) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:__docker_volume_complete_ls_filters" \ + "($help)--format=[Pretty-print volumes using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display volume names]" && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force the removal of one or more volumes]" \ + "($help -):volume:__docker_complete_volumes" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 + ;; + esac + + return ret +} + +# EO volume + +__docker_caching_policy() { + oldp=( "$1"(Nmh+1) ) # 1 hour + (( $#oldp )) +} + +__docker_commands() { + local cache_policy + integer force_invalidation=0 + + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + if ( (( ! ${+_docker_hide_legacy_commands} )) || _cache_invalid docker_hide_legacy_commands ) \ + && ! _retrieve_cache docker_hide_legacy_commands; + then + _docker_hide_legacy_commands="${DOCKER_HIDE_LEGACY_COMMANDS}" + _store_cache docker_hide_legacy_commands _docker_hide_legacy_commands + fi + + if [[ "${_docker_hide_legacy_commands}" != "${DOCKER_HIDE_LEGACY_COMMANDS}" ]]; then + force_invalidation=1 + _docker_hide_legacy_commands="${DOCKER_HIDE_LEGACY_COMMANDS}" + _store_cache docker_hide_legacy_commands _docker_hide_legacy_commands + fi + + if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands ) \ + && ! _retrieve_cache docker_subcommands || [[ ${force_invalidation} -eq 1 ]]; + then + local -a lines + lines=(${(f)"$(_call_program commands docker 2>&1)"}) + _docker_subcommands=(${${${(M)${lines[$((${lines[(i)*Commands:]} + 1)),-1]}:# *}## #}/ ##/:}) + _docker_subcommands=($_docker_subcommands 'daemon:Enable daemon mode' 'help:Show help for a command') + (( $#_docker_subcommands > 2 )) && _store_cache docker_subcommands _docker_subcommands + fi + _describe -t docker-commands "docker command" _docker_subcommands +} + +__docker_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (attach|commit|cp|create|diff|exec|export|kill|logs|pause|unpause|port|rename|restart|rm|run|start|stats|stop|top|update|wait) + __docker_container_subcommand && ret=0 + ;; + (build|history|import|load|pull|push|save|tag) + __docker_image_subcommand && ret=0 + ;; + (checkpoint) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_checkpoint_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_checkpoint_subcommand && ret=0 + ;; + esac + ;; + (container) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_container_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_container_subcommand && ret=0 + ;; + esac + ;; + (daemon) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--add-runtime=[Register an additional OCI compatible runtime]:runtime:__docker_complete_runtimes" \ + "($help)*--allow-nondistributable-artifacts=[Push nondistributable artifacts to specified registries]:registry: " \ + "($help)--api-cors-header=[CORS headers in the Engine API]:CORS headers: " \ + "($help)*--authorization-plugin=[Authorization plugins to load]" \ + "($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \ + "($help)--bip=[Network bridge IP]:IP address: " \ + "($help)--cgroup-parent=[Parent cgroup for all containers]:cgroup: " \ + "($help)--cluster-advertise=[Address or interface name to advertise]:Instance to advertise (host\:port): " \ + "($help)--cluster-store=[URL of the distributed storage backend]:Cluster Store:->cluster-store" \ + "($help)*--cluster-store-opt=[Cluster store options]:Cluster options:->cluster-store-options" \ + "($help)--config-file=[Path to daemon configuration file]:Config File:_files" \ + "($help)--containerd=[Path to containerd socket]:socket:_files -g \"*.sock\"" \ + "($help)--data-root=[Root directory of persisted Docker data]:path:_directories" \ + "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ + "($help)--default-gateway[Container default gateway IPv4 address]:IPv4 address: " \ + "($help)--default-gateway-v6[Container default gateway IPv6 address]:IPv6 address: " \ + "($help)--default-shm-size=[Default shm size for containers]:size:" \ + "($help)*--default-ulimit=[Default ulimits for containers]:ulimit: " \ + "($help)--disable-legacy-registry[Disable contacting legacy registries]" \ + "($help)*--dns=[DNS server to use]:DNS: " \ + "($help)*--dns-opt=[DNS options to use]:DNS option: " \ + "($help)*--dns-search=[DNS search domains to use]:DNS search: " \ + "($help)*--exec-opt=[Runtime execution options]:runtime execution options: " \ + "($help)--exec-root=[Root directory for execution state files]:path:_directories" \ + "($help)--experimental[Enable experimental features]" \ + "($help)--fixed-cidr=[IPv4 subnet for fixed IPs]:IPv4 subnet: " \ + "($help)--fixed-cidr-v6=[IPv6 subnet for fixed IPs]:IPv6 subnet: " \ + "($help -G --group)"{-G=,--group=}"[Group for the unix socket]:group:_groups" \ + "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ + "($help)--icc[Enable inter-container communication]" \ + "($help)--init[Run an init inside containers to forward signals and reap processes]" \ + "($help)--init-path=[Path to the docker-init binary]:docker-init binary:_files" \ + "($help)*--insecure-registry=[Enable insecure registry communication]:registry: " \ + "($help)--ip=[Default IP when binding container ports]" \ + "($help)--ip-forward[Enable net.ipv4.ip_forward]" \ + "($help)--ip-masq[Enable IP masquerading]" \ + "($help)--iptables[Enable addition of iptables rules]" \ + "($help)--ipv6[Enable IPv6 networking]" \ + "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ + "($help)*--label=[Key=value labels]:label: " \ + "($help)--live-restore[Enable live restore of docker when containers are still running]" \ + "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_complete_log_drivers" \ + "($help)*--log-opt=[Default log driver options for containers]:log driver options:__docker_complete_log_options" \ + "($help)--max-concurrent-downloads[Set the max concurrent downloads for each pull]" \ + "($help)--max-concurrent-uploads[Set the max concurrent uploads for each push]" \ + "($help)--mtu=[Network MTU]:mtu:(0 576 1420 1500 9000)" \ + "($help)--oom-score-adjust=[Set the oom_score_adj for the daemon]:oom-score:(-500)" \ + "($help -p --pidfile)"{-p=,--pidfile=}"[Path to use for daemon PID file]:PID file:_files" \ + "($help)--raw-logs[Full timestamps without ANSI coloring]" \ + "($help)*--registry-mirror=[Preferred Docker registry mirror]:registry mirror: " \ + "($help)--seccomp-profile=[Path to seccomp profile]:path:_files -g \"*.json\"" \ + "($help -s --storage-driver)"{-s=,--storage-driver=}"[Storage driver to use]:driver:(aufs btrfs devicemapper overlay overlay2 vfs zfs)" \ + "($help)--selinux-enabled[Enable selinux support]" \ + "($help)--shutdown-timeout=[Set the shutdown timeout value in seconds]:time: " \ + "($help)*--storage-opt=[Storage driver options]:storage driver options: " \ + "($help)--tls[Use TLS]" \ + "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g \"*.(pem|crt)\"" \ + "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g \"*.(pem|crt)\"" \ + "($help)--tlskey=[Path to TLS key file]:Key file:_files -g \"*.(pem|key)\"" \ + "($help)--tlsverify[Use TLS and verify the remote]" \ + "($help)--userns-remap=[User/Group setting for user namespaces]:user\:group:->users-groups" \ + "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ + "($help)--userland-proxy-path=[Path to the userland proxy binary]:binary:_files" && ret=0 + + case $state in + (cluster-store) + if compset -P '*://'; then + _message 'host:port' && ret=0 + else + store=('consul' 'etcd' 'zk') + _describe -t cluster-store "Cluster Store" store -qS "://" && ret=0 + fi + ;; + (cluster-store-options) + if compset -P '*='; then + _files && ret=0 + else + opts=('discovery.heartbeat' 'discovery.ttl' 'kv.cacertfile' 'kv.certfile' 'kv.keyfile' 'kv.path') + _describe -t cluster-store-opts "Cluster Store Options" opts -qS "=" && ret=0 + fi + ;; + (users-groups) + if compset -P '*:'; then + _groups && ret=0 + else + _describe -t userns-default "default Docker user management" '(default)' && ret=0 + _users && ret=0 + fi + ;; + esac + ;; + (events|info) + __docker_system_subcommand && ret=0 + ;; + (image) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_image_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_image_subcommand && ret=0 + ;; + esac + ;; + (images) + words[1]='ls' + __docker_image_subcommand && ret=0 + ;; + (inspect) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -s --size)"{-s,--size}"[Display total file sizes if the type is container]" \ + "($help)--type=[Return JSON for specified type]:type:(container image network node plugin service volume)" \ + "($help -)*: :->values" && ret=0 + + case $state in + (values) + if [[ ${words[(r)--type=container]} == --type=container ]]; then + __docker_complete_containers && ret=0 + elif [[ ${words[(r)--type=image]} == --type=image ]]; then + __docker_complete_images && ret=0 + elif [[ ${words[(r)--type=network]} == --type=network ]]; then + __docker_complete_networks && ret=0 + elif [[ ${words[(r)--type=node]} == --type=node ]]; then + __docker_complete_nodes && ret=0 + elif [[ ${words[(r)--type=plugin]} == --type=plugin ]]; then + __docker_complete_plugins && ret=0 + elif [[ ${words[(r)--type=service]} == --type=secrets ]]; then + __docker_complete_secrets && ret=0 + elif [[ ${words[(r)--type=service]} == --type=service ]]; then + __docker_complete_services && ret=0 + elif [[ ${words[(r)--type=volume]} == --type=volume ]]; then + __docker_complete_volumes && ret=0 + else + __docker_complete_containers + __docker_complete_images + __docker_complete_networks + __docker_complete_nodes + __docker_complete_plugins + __docker_complete_secrets + __docker_complete_services + __docker_complete_volumes && ret=0 + fi + ;; + esac + ;; + (login) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -p --password)"{-p=,--password=}"[Password]:password: " \ + "($help -u --user)"{-u=,--user=}"[Username]:username: " \ + "($help -)1:server: " && ret=0 + ;; + (logout) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -)1:server: " && ret=0 + ;; + (network) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_network_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_network_subcommand && ret=0 + ;; + esac + ;; + (node) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_node_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_node_subcommand && ret=0 + ;; + esac + ;; + (plugin) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_plugin_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_plugin_subcommand && ret=0 + ;; + esac + ;; + (ps) + words[1]='ls' + __docker_container_subcommand && ret=0 + ;; + (rmi) + words[1]='rm' + __docker_image_subcommand && ret=0 + ;; + (search) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_search_filters" \ + "($help)--limit=[Maximum returned search results]:limit:(1 5 10 25 50)" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -):term: " && ret=0 + ;; + (secret) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_secret_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_secret_subcommand && ret=0 + ;; + esac + ;; + (service) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_service_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_service_subcommand && ret=0 + ;; + esac + ;; + (stack) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_stack_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_stack_subcommand && ret=0 + ;; + esac + ;; + (swarm) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_swarm_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_swarm_subcommand && ret=0 + ;; + esac + ;; + (system) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_system_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_system_subcommand && ret=0 + ;; + esac + ;; + (version) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " && ret=0 + ;; + (volume) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_volume_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_volume_subcommand && ret=0 + ;; + esac + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_commands" && ret=0 + ;; + esac + + return ret +} + +_docker() { + # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. + # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. + if [[ $service != docker ]]; then + _call_function - _$service + return + fi + + local curcontext="$curcontext" state line help="-h --help" + integer ret=1 + typeset -A opt_args + + _arguments $(__docker_arguments) -C \ + "(: -)"{-h,--help}"[Print usage]" \ + "($help)--config[Location of client config files]:path:_directories" \ + "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ + "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ + "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ + "($help)--tls[Use TLS]" \ + "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g "*.(pem|crt)"" \ + "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g "*.(pem|crt)"" \ + "($help)--tlskey=[Path to TLS key file]:Key file:_files -g "*.(pem|key)"" \ + "($help)--tlsverify[Use TLS and verify the remote]" \ + "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ + "($help -v --version)"{-v,--version}"[Print version information and quit]" \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + local host=${opt_args[-H]}${opt_args[--host]} + local config=${opt_args[--config]} + local docker_options="${host:+--host $host} ${config:+--config $config}" + + case $state in + (command) + __docker_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-$words[1]: + __docker_subcommand && ret=0 + ;; + esac + + return ret +} + +_dockerd() { + integer ret=1 + words[1]='daemon' + __docker_subcommand && ret=0 + return ret +} + +_docker "$@" + +# Local Variables: +# mode: Shell-Script +# sh-indentation: 4 +# indent-tabs-mode: nil +# sh-basic-offset: 4 +# End: +# vim: ft=zsh sw=4 ts=4 et diff --git a/components/engine/contrib/desktop-integration/README.md b/components/engine/contrib/desktop-integration/README.md new file mode 100644 index 0000000000..85a01b9ee9 --- /dev/null +++ b/components/engine/contrib/desktop-integration/README.md @@ -0,0 +1,11 @@ +Desktop Integration +=================== + +The ./contrib/desktop-integration contains examples of typical dockerized +desktop applications. + +Examples +======== + +* Chromium: ./chromium/Dockerfile shows a way to dockerize a common application +* Gparted: ./gparted/Dockerfile shows a way to dockerize a common application w devices diff --git a/components/engine/contrib/desktop-integration/chromium/Dockerfile b/components/engine/contrib/desktop-integration/chromium/Dockerfile new file mode 100644 index 0000000000..187281644f --- /dev/null +++ b/components/engine/contrib/desktop-integration/chromium/Dockerfile @@ -0,0 +1,36 @@ +# VERSION: 0.1 +# DESCRIPTION: Create chromium container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a Chromium container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download Chromium Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/chromium/Dockerfile +# +# # Build chromium image +# docker build -t chromium . +# +# # Run stateful data-on-host chromium. For ephemeral, remove -v /data/chromium:/data +# docker run -v /data/chromium:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# # To run stateful dockerized data containers +# docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# Base docker image +FROM debian:jessie +LABEL maintainer Jessica Frazelle + +# Install Chromium +RUN apt-get update && apt-get install -y \ + chromium \ + chromium-l10n \ + libcanberra-gtk-module \ + libexif-dev \ + --no-install-recommends + +# Autorun chromium +CMD ["/usr/bin/chromium", "--no-sandbox", "--user-data-dir=/data"] diff --git a/components/engine/contrib/desktop-integration/gparted/Dockerfile b/components/engine/contrib/desktop-integration/gparted/Dockerfile new file mode 100644 index 0000000000..8a9b646ee4 --- /dev/null +++ b/components/engine/contrib/desktop-integration/gparted/Dockerfile @@ -0,0 +1,31 @@ +# VERSION: 0.1 +# DESCRIPTION: Create gparted container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a gparted container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download gparted Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/gparted/Dockerfile +# +# # Build gparted image +# docker build -t gparted . +# +# docker run -v /tmp/.X11-unix:/tmp/.X11-unix \ +# --device=/dev/sda:/dev/sda \ +# -e DISPLAY=unix$DISPLAY gparted +# + +# Base docker image +FROM debian:jessie +LABEL maintainer Jessica Frazelle + +# Install Gparted and its dependencies +RUN apt-get update && apt-get install -y \ + gparted \ + libcanberra-gtk-module \ + --no-install-recommends + +# Autorun gparted +CMD ["/usr/sbin/gparted"] diff --git a/components/engine/contrib/docker-device-tool/README.md b/components/engine/contrib/docker-device-tool/README.md new file mode 100644 index 0000000000..6c54d5995f --- /dev/null +++ b/components/engine/contrib/docker-device-tool/README.md @@ -0,0 +1,14 @@ +Docker device tool for devicemapper storage driver backend +=================== + +The ./contrib/docker-device-tool contains a tool to manipulate devicemapper thin-pool. + +Compile +======== + + $ make shell + ## inside build container + $ go build contrib/docker-device-tool/device_tool.go + + # if devicemapper version is old and compilation fails, compile with `libdm_no_deferred_remove` tag + $ go build -tags libdm_no_deferred_remove contrib/docker-device-tool/device_tool.go diff --git a/components/engine/contrib/docker-device-tool/device_tool.go b/components/engine/contrib/docker-device-tool/device_tool.go new file mode 100644 index 0000000000..fc171666fa --- /dev/null +++ b/components/engine/contrib/docker-device-tool/device_tool.go @@ -0,0 +1,176 @@ +// +build !windows,!solaris + +package main + +import ( + "flag" + "fmt" + "os" + "path" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver/devmapper" + "github.com/docker/docker/pkg/devicemapper" +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: %s [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0]) + flag.PrintDefaults() + os.Exit(1) +} + +func byteSizeFromString(arg string) (int64, error) { + digits := "" + rest := "" + last := strings.LastIndexAny(arg, "0123456789") + if last >= 0 { + digits = arg[:last+1] + rest = arg[last+1:] + } + + val, err := strconv.ParseInt(digits, 10, 64) + if err != nil { + return val, err + } + + rest = strings.ToLower(strings.TrimSpace(rest)) + + var multiplier int64 = 1 + switch rest { + case "": + multiplier = 1 + case "k", "kb": + multiplier = 1024 + case "m", "mb": + multiplier = 1024 * 1024 + case "g", "gb": + multiplier = 1024 * 1024 * 1024 + case "t", "tb": + multiplier = 1024 * 1024 * 1024 * 1024 + default: + return 0, fmt.Errorf("Unknown size unit: %s", rest) + } + + return val * multiplier, nil +} + +func main() { + root := flag.String("r", "/var/lib/docker", "Docker root dir") + flDebug := flag.Bool("D", false, "Debug mode") + + flag.Parse() + + if *flDebug { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) + } + + if flag.NArg() < 1 { + usage() + } + + args := flag.Args() + + home := path.Join(*root, "devicemapper") + devices, err := devmapper.NewDeviceSet(home, false, nil, nil, nil) + if err != nil { + fmt.Println("Can't initialize device mapper: ", err) + os.Exit(1) + } + + switch args[0] { + case "status": + status := devices.Status() + fmt.Printf("Pool name: %s\n", status.PoolName) + fmt.Printf("Data Loopback file: %s\n", status.DataLoopback) + fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback) + fmt.Printf("Sector size: %d\n", status.SectorSize) + fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) + fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) + break + case "list": + ids := devices.List() + sort.Strings(ids) + for _, id := range ids { + fmt.Println(id) + } + break + case "device": + if flag.NArg() < 2 { + usage() + } + status, err := devices.GetDeviceStatus(args[1]) + if err != nil { + fmt.Println("Can't get device info: ", err) + os.Exit(1) + } + fmt.Printf("Id: %d\n", status.DeviceID) + fmt.Printf("Size: %d\n", status.Size) + fmt.Printf("Transaction Id: %d\n", status.TransactionID) + fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) + fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) + fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) + break + case "resize": + if flag.NArg() < 2 { + usage() + } + + size, err := byteSizeFromString(args[1]) + if err != nil { + fmt.Println("Invalid size: ", err) + os.Exit(1) + } + + err = devices.ResizePool(size) + if err != nil { + fmt.Println("Error resizing pool: ", err) + os.Exit(1) + } + + break + case "snap": + if flag.NArg() < 3 { + usage() + } + + err := devices.AddDevice(args[1], args[2], nil) + if err != nil { + fmt.Println("Can't create snap device: ", err) + os.Exit(1) + } + break + case "remove": + if flag.NArg() < 2 { + usage() + } + + err := devicemapper.RemoveDevice(args[1]) + if err != nil { + fmt.Println("Can't remove device: ", err) + os.Exit(1) + } + break + case "mount": + if flag.NArg() < 3 { + usage() + } + + err := devices.MountDevice(args[1], args[2], "") + if err != nil { + fmt.Println("Can't mount device: ", err) + os.Exit(1) + } + break + default: + fmt.Printf("Unknown command %s\n", args[0]) + usage() + + os.Exit(1) + } + + return +} diff --git a/components/engine/contrib/docker-device-tool/device_tool_windows.go b/components/engine/contrib/docker-device-tool/device_tool_windows.go new file mode 100644 index 0000000000..da29a2cadf --- /dev/null +++ b/components/engine/contrib/docker-device-tool/device_tool_windows.go @@ -0,0 +1,4 @@ +package main + +func main() { +} diff --git a/components/engine/contrib/docker-machine-install-bundle.sh b/components/engine/contrib/docker-machine-install-bundle.sh new file mode 100755 index 0000000000..860598943b --- /dev/null +++ b/components/engine/contrib/docker-machine-install-bundle.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash +# +# This script installs the bundle to Docker Machine instances, for the purpose +# of testing the latest Docker with Swarm mode enabled. +# Do not use in production. +# +# Requirements (on host to run this script) +# - bash is installed +# - Docker Machine is installed +# - GNU tar is installed +# +# Requirements (on Docker machine instances) +# - Docker can be managed via one of `systemctl`, `service`, or `/etc/init.d/docker` +# +set -e +set -o pipefail + +errexit() { + echo "$1" + exit 1 +} + +BUNDLE="bundles/$(cat VERSION)" + +bundle_files(){ + # prefer dynbinary if exists + for f in dockerd docker-proxy; do + if [ -d $BUNDLE/dynbinary-daemon ]; then + echo $BUNDLE/dynbinary-daemon/$f + else + echo $BUNDLE/binary-daemon/$f + fi + done + for f in docker-containerd docker-containerd-ctr docker-containerd-shim docker-init docker-runc; do + echo $BUNDLE/binary-daemon/$f + done + if [ -d $BUNDLE/dynbinary-client ]; then + echo $BUNDLE/dynbinary-client/docker + else + echo $BUNDLE/binary-client/docker + fi +} + +control_docker(){ + m=$1; op=$2 + # NOTE: `docker-machine ssh $m sh -c "foo bar"` does not work + # (but `docker-machine ssh $m sh -c "foo\ bar"` works) + # Anyway we avoid using `sh -c` here for avoiding confusion + cat < /dev/null; then + systemctl $op docker +elif command -v service > /dev/null; then + service docker $op +elif [ -x /etc/init.d/docker ]; then + /etc/init.d/docker $op +else + echo "not sure how to control the docker daemon" + exit 1 +fi +EOF +} + +detect_prefix(){ + m=$1 + script='dirname $(dirname $(which dockerd))' + echo $script | docker-machine ssh $m sh +} + +install_to(){ + m=$1; shift; files=$@ + echo "$m: detecting docker" + prefix=$(detect_prefix $m) + echo "$m: detected docker on $prefix" + echo "$m: stopping docker" + control_docker $m stop + echo "$m: installing docker" + # NOTE: GNU tar is required because we use --transform here + # TODO: compression (should not be default) + tar ch --transform 's/.*\///' $files | docker-machine ssh $m sudo tar Cx $prefix/bin + echo "$m: starting docker" + control_docker $m start + echo "$m: done" +} + +check_prereq(){ + command -v docker-machine > /dev/null || errexit "docker-machine not installed" + ( tar --version | grep GNU > /dev/null ) || errexit "GNU tar not installed" +} + +case "$1" in + "install") + shift; machines=$@ + check_prereq + files=$(bundle_files) + echo "Files to be installed:" + for f in $files; do echo $f; done + pids=() + for m in $machines; do + install_to $m $files & + pids+=($!) + done + status=0 + for pid in ${pids[@]}; do + wait $pid || { status=$?; echo "background process $pid failed with exit status $status"; } + done + exit $status + ;; + *) + errexit "Usage: $0 install MACHINES" + ;; +esac diff --git a/components/engine/contrib/dockerize-disk.sh b/components/engine/contrib/dockerize-disk.sh new file mode 100755 index 0000000000..444e243abe --- /dev/null +++ b/components/engine/contrib/dockerize-disk.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +set -e + +if ! command -v qemu-nbd &> /dev/null; then + echo >&2 'error: "qemu-nbd" not found!' + exit 1 +fi + +usage() { + echo "Convert disk image to docker image" + echo "" + echo "usage: $0 image-name disk-image-file [ base-image ]" + echo " ie: $0 cirros:0.3.3 cirros-0.3.3-x86_64-disk.img" + echo " $0 ubuntu:cloud ubuntu-14.04-server-cloudimg-amd64-disk1.img ubuntu:14.04" +} + +if [ "$#" -lt 2 ]; then + usage + exit 1 +fi + +CURDIR=$(pwd) + +image_name="${1%:*}" +image_tag="${1#*:}" +if [ "$image_tag" == "$1" ]; then + image_tag="latest" +fi + +disk_image_file="$2" +docker_base_image="$3" + +block_device=/dev/nbd0 + +builddir=$(mktemp -d) + +cleanup() { + umount "$builddir/disk_image" || true + umount "$builddir/workdir" || true + qemu-nbd -d $block_device &> /dev/null || true + rm -rf $builddir +} +trap cleanup EXIT + +# Mount disk image +modprobe nbd max_part=63 +qemu-nbd -rc ${block_device} -P 1 "$disk_image_file" +mkdir "$builddir/disk_image" +mount -o ro ${block_device} "$builddir/disk_image" + +mkdir "$builddir/workdir" +mkdir "$builddir/diff" + +base_image_mounts="" + +# Unpack base image +if [ -n "$docker_base_image" ]; then + mkdir -p "$builddir/base" + docker pull "$docker_base_image" + docker save "$docker_base_image" | tar -xC "$builddir/base" + + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + while [ -n "$image_id" ]; do + mkdir -p "$builddir/base/$image_id/layer" + tar -xf "$builddir/base/$image_id/layer.tar" -C "$builddir/base/$image_id/layer" + + base_image_mounts="${base_image_mounts}:$builddir/base/$image_id/layer=ro+wh" + image_id=$(docker inspect -f "{{.Parent}}" "$image_id") + done +fi + +# Mount work directory +mount -t aufs -o "br=$builddir/diff=rw${base_image_mounts},dio,xino=/dev/shm/aufs.xino" none "$builddir/workdir" + +# Update files +cd $builddir +LC_ALL=C diff -rq disk_image workdir \ + | sed -re "s|Only in workdir(.*?): |DEL \1/|g;s|Only in disk_image(.*?): |ADD \1/|g;s|Files disk_image/(.+) and workdir/(.+) differ|UPDATE /\1|g" \ + | while read action entry; do + case "$action" in + ADD|UPDATE) + cp -a "disk_image$entry" "workdir$entry" + ;; + DEL) + rm -rf "workdir$entry" + ;; + *) + echo "Error: unknown diff line: $action $entry" >&2 + ;; + esac + done + +# Pack new image +new_image_id="$(for i in $(seq 1 32); do printf "%02x" $(($RANDOM % 256)); done)" +mkdir -p $builddir/result/$new_image_id +cd diff +tar -cf $builddir/result/$new_image_id/layer.tar * +echo "1.0" > $builddir/result/$new_image_id/VERSION +cat > $builddir/result/$new_image_id/json <<-EOS +{ "docker_version": "1.4.1" +, "id": "$new_image_id" +, "created": "$(date -u +%Y-%m-%dT%H:%M:%S.%NZ)" +EOS + +if [ -n "$docker_base_image" ]; then + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + echo ", \"parent\": \"$image_id\"" >> $builddir/result/$new_image_id/json +fi + +echo "}" >> $builddir/result/$new_image_id/json + +echo "{\"$image_name\":{\"$image_tag\":\"$new_image_id\"}}" > $builddir/result/repositories + +cd $builddir/result + +# mkdir -p $CURDIR/$image_name +# cp -r * $CURDIR/$image_name +tar -c * | docker load diff --git a/components/engine/contrib/download-frozen-image-v1.sh b/components/engine/contrib/download-frozen-image-v1.sh new file mode 100755 index 0000000000..77c91d1f1b --- /dev/null +++ b/components/engine/contrib/download-frozen-image-v1.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash +set -e + +# hello-world latest ef872312fe1b 3 months ago 910 B +# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B + +# debian latest f6fab3b798be 10 weeks ago 85.1 MB +# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB + +if ! command -v curl &> /dev/null; then + echo >&2 'error: "curl" not found!' + exit 1 +fi + +usage() { + echo "usage: $0 dir image[:tag][@image-id] ..." + echo " ie: $0 /tmp/hello-world hello-world" + echo " $0 /tmp/debian-jessie debian:jessie" + echo " $0 /tmp/old-hello-world hello-world@ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9" + echo " $0 /tmp/old-debian debian:latest@f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd" + [ -z "$1" ] || exit "$1" +} + +dir="$1" # dir for building tar in +shift || usage 1 >&2 + +[ $# -gt 0 -a "$dir" ] || usage 2 >&2 +mkdir -p "$dir" + +# hacky workarounds for Bash 3 support (no associative arrays) +images=() +rm -f "$dir"/tags-*.tmp +# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' + +while [ $# -gt 0 ]; do + imageTag="$1" + shift + image="${imageTag%%[:@]*}" + tag="${imageTag#*:}" + imageId="${tag##*@}" + [ "$imageId" != "$tag" ] || imageId= + [ "$tag" != "$imageTag" ] || tag='latest' + tag="${tag%@*}" + + imageFile="${image//\//_}" # "/" can't be in filenames :) + + token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')" + + if [ -z "$imageId" ]; then + imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")" + imageId="${imageId//\"/}" + fi + + ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")" + if [ "${ancestryJson:0:1}" != '[' ]; then + echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:" + echo >&2 " $ancestryJson" + exit 1 + fi + + IFS=',' + ancestry=( ${ancestryJson//[\[\] \"]/} ) + unset IFS + + if [ -s "$dir/tags-$imageFile.tmp" ]; then + echo -n ', ' >> "$dir/tags-$imageFile.tmp" + else + images=( "${images[@]}" "$image" ) + fi + echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" + + echo "Downloading '$imageTag' (${#ancestry[@]} layers)..." + for imageId in "${ancestry[@]}"; do + mkdir -p "$dir/$imageId" + echo '1.0' > "$dir/$imageId/VERSION" + + curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$imageId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${imageId:0:12}" + continue + fi + curl -SL --progress -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/layer" -o "$dir/$imageId/layer.tar" # -C - + done + echo +done + +echo -n '{' > "$dir/repositories" +firstImage=1 +for image in "${images[@]}"; do + imageFile="${image//\//_}" # "/" can't be in filenames :) + + [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" + firstImage= + echo -n $'\n\t' >> "$dir/repositories" + echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" +done +echo -n $'\n}\n' >> "$dir/repositories" + +rm -f "$dir"/tags-*.tmp + +echo "Download of images into '$dir' complete." +echo "Use something like the following to load the result into a Docker daemon:" +echo " tar -cC '$dir' . | docker load" diff --git a/components/engine/contrib/download-frozen-image-v2.sh b/components/engine/contrib/download-frozen-image-v2.sh new file mode 100755 index 0000000000..37b5038938 --- /dev/null +++ b/components/engine/contrib/download-frozen-image-v2.sh @@ -0,0 +1,275 @@ +#!/usr/bin/env bash +set -eo pipefail + +# hello-world latest ef872312fe1b 3 months ago 910 B +# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B + +# debian latest f6fab3b798be 10 weeks ago 85.1 MB +# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB + +if ! command -v curl &> /dev/null; then + echo >&2 'error: "curl" not found!' + exit 1 +fi +if ! command -v jq &> /dev/null; then + echo >&2 'error: "jq" not found!' + exit 1 +fi + +usage() { + echo "usage: $0 dir image[:tag][@digest] ..." + echo " $0 /tmp/old-hello-world hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7" + [ -z "$1" ] || exit "$1" +} + +dir="$1" # dir for building tar in +shift || usage 1 >&2 + +[ $# -gt 0 -a "$dir" ] || usage 2 >&2 +mkdir -p "$dir" + +# hacky workarounds for Bash 3 support (no associative arrays) +images=() +rm -f "$dir"/tags-*.tmp +manifestJsonEntries=() +doNotGenerateManifestJson= +# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' + +# bash v4 on Windows CI requires CRLF separator +newlineIFS=$'\n' +if [ "$(go env GOHOSTOS)" = 'windows' ]; then + major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1) + if [ "$major" -ge 4 ]; then + newlineIFS=$'\r\n' + fi +fi + +while [ $# -gt 0 ]; do + imageTag="$1" + shift + image="${imageTag%%[:@]*}" + imageTag="${imageTag#*:}" + digest="${imageTag##*@}" + tag="${imageTag%%@*}" + + # add prefix library if passed official image + if [[ "$image" != *"/"* ]]; then + image="library/$image" + fi + + imageFile="${image//\//_}" # "/" can't be in filenames :) + + token="$(curl -fsSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$image:pull" | jq --raw-output '.token')" + + manifestJson="$( + curl -fsSL \ + -H "Authorization: Bearer $token" \ + -H 'Accept: application/vnd.docker.distribution.manifest.v2+json' \ + -H 'Accept: application/vnd.docker.distribution.manifest.v1+json' \ + "https://registry-1.docker.io/v2/$image/manifests/$digest" + )" + if [ "${manifestJson:0:1}" != '{' ]; then + echo >&2 "error: /v2/$image/manifests/$digest returned something unexpected:" + echo >&2 " $manifestJson" + exit 1 + fi + + imageIdentifier="$image:$tag@$digest" + + schemaVersion="$(echo "$manifestJson" | jq --raw-output '.schemaVersion')" + case "$schemaVersion" in + 2) + mediaType="$(echo "$manifestJson" | jq --raw-output '.mediaType')" + + case "$mediaType" in + application/vnd.docker.distribution.manifest.v2+json) + configDigest="$(echo "$manifestJson" | jq --raw-output '.config.digest')" + imageId="${configDigest#*:}" # strip off "sha256:" + + configFile="$imageId.json" + curl -fsSL \ + -H "Authorization: Bearer $token" \ + "https://registry-1.docker.io/v2/$image/blobs/$configDigest" \ + -o "$dir/$configFile" + + layersFs="$(echo "$manifestJson" | jq --raw-output --compact-output '.layers[]')" + IFS="$newlineIFS" + layers=( $layersFs ) + unset IFS + + echo "Downloading '$imageIdentifier' (${#layers[@]} layers)..." + layerId= + layerFiles=() + for i in "${!layers[@]}"; do + layerMeta="${layers[$i]}" + + layerMediaType="$(echo "$layerMeta" | jq --raw-output '.mediaType')" + layerDigest="$(echo "$layerMeta" | jq --raw-output '.digest')" + + # save the previous layer's ID + parentId="$layerId" + # create a new fake layer ID based on this layer's digest and the previous layer's fake ID + layerId="$(echo "$parentId"$'\n'"$layerDigest" | sha256sum | cut -d' ' -f1)" + # this accounts for the possibility that an image contains the same layer twice (and thus has a duplicate digest value) + + mkdir -p "$dir/$layerId" + echo '1.0' > "$dir/$layerId/VERSION" + + if [ ! -s "$dir/$layerId/json" ]; then + parentJson="$(printf ', parent: "%s"' "$parentId")" + addJson="$(printf '{ id: "%s"%s }' "$layerId" "${parentId:+$parentJson}")" + # this starter JSON is taken directly from Docker's own "docker save" output for unimportant layers + jq "$addJson + ." > "$dir/$layerId/json" <<-'EOJSON' + { + "created": "0001-01-01T00:00:00Z", + "container_config": { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": null, + "Image": "", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": null, + "OnBuild": null, + "Labels": null + } + } + EOJSON + fi + + case "$layerMediaType" in + application/vnd.docker.image.rootfs.diff.tar.gzip) + layerTar="$layerId/layer.tar" + layerFiles=( "${layerFiles[@]}" "$layerTar" ) + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$layerTar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${layerId:0:12}" + continue + fi + curl -fSL --progress \ + -H "Authorization: Bearer $token" \ + "https://registry-1.docker.io/v2/$image/blobs/$layerDigest" \ + -o "$dir/$layerTar" + ;; + + *) + echo >&2 "error: unknown layer mediaType ($imageIdentifier, $layerDigest): '$layerMediaType'" + exit 1 + ;; + esac + done + + # change "$imageId" to be the ID of the last layer we added (needed for old-style "repositories" file which is created later -- specifically for older Docker daemons) + imageId="$layerId" + + # munge the top layer image manifest to have the appropriate image configuration for older daemons + imageOldConfig="$(jq --raw-output --compact-output '{ id: .id } + if .parent then { parent: .parent } else {} end' "$dir/$imageId/json")" + jq --raw-output "$imageOldConfig + del(.history, .rootfs)" "$dir/$configFile" > "$dir/$imageId/json" + + manifestJsonEntry="$( + echo '{}' | jq --raw-output '. + { + Config: "'"$configFile"'", + RepoTags: ["'"${image#library\/}:$tag"'"], + Layers: '"$(echo '[]' | jq --raw-output ".$(for layerFile in "${layerFiles[@]}"; do echo " + [ \"$layerFile\" ]"; done)")"' + }' + )" + manifestJsonEntries=( "${manifestJsonEntries[@]}" "$manifestJsonEntry" ) + ;; + + *) + echo >&2 "error: unknown manifest mediaType ($imageIdentifier): '$mediaType'" + exit 1 + ;; + esac + ;; + + 1) + if [ -z "$doNotGenerateManifestJson" ]; then + echo >&2 "warning: '$imageIdentifier' uses schemaVersion '$schemaVersion'" + echo >&2 " this script cannot (currently) recreate the 'image config' to put in a 'manifest.json' (thus any schemaVersion 2+ images will be imported in the old way, and their 'docker history' will suffer)" + echo >&2 + doNotGenerateManifestJson=1 + fi + + layersFs="$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum')" + IFS="$newlineIFS" + layers=( $layersFs ) + unset IFS + + history="$(echo "$manifestJson" | jq '.history | [.[] | .v1Compatibility]')" + imageId="$(echo "$history" | jq --raw-output '.[0]' | jq --raw-output '.id')" + + echo "Downloading '$imageIdentifier' (${#layers[@]} layers)..." + for i in "${!layers[@]}"; do + imageJson="$(echo "$history" | jq --raw-output ".[${i}]")" + layerId="$(echo "$imageJson" | jq --raw-output '.id')" + imageLayer="${layers[$i]}" + + mkdir -p "$dir/$layerId" + echo '1.0' > "$dir/$layerId/VERSION" + + echo "$imageJson" > "$dir/$layerId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$layerId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${layerId:0:12}" + continue + fi + curl -fSL --progress -H "Authorization: Bearer $token" "https://registry-1.docker.io/v2/$image/blobs/$imageLayer" -o "$dir/$layerId/layer.tar" # -C - + done + ;; + + *) + echo >&2 "error: unknown manifest schemaVersion ($imageIdentifier): '$schemaVersion'" + exit 1 + ;; + esac + + echo + + if [ -s "$dir/tags-$imageFile.tmp" ]; then + echo -n ', ' >> "$dir/tags-$imageFile.tmp" + else + images=( "${images[@]}" "$image" ) + fi + echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" +done + +echo -n '{' > "$dir/repositories" +firstImage=1 +for image in "${images[@]}"; do + imageFile="${image//\//_}" # "/" can't be in filenames :) + image="${image#library\/}" + + [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" + firstImage= + echo -n $'\n\t' >> "$dir/repositories" + echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" +done +echo -n $'\n}\n' >> "$dir/repositories" + +rm -f "$dir"/tags-*.tmp + +if [ -z "$doNotGenerateManifestJson" ] && [ "${#manifestJsonEntries[@]}" -gt 0 ]; then + echo '[]' | jq --raw-output ".$(for entry in "${manifestJsonEntries[@]}"; do echo " + [ $entry ]"; done)" > "$dir/manifest.json" +else + rm -f "$dir/manifest.json" +fi + +echo "Download of images into '$dir' complete." +echo "Use something like the following to load the result into a Docker daemon:" +echo " tar -cC '$dir' . | docker load" diff --git a/components/engine/contrib/editorconfig b/components/engine/contrib/editorconfig new file mode 100644 index 0000000000..97eda89a4b --- /dev/null +++ b/components/engine/contrib/editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = tab +indent_size = 4 +trim_trailing_whitespace = true + +[*.md] +indent_size = 2 +indent_style = space diff --git a/components/engine/contrib/gitdm/aliases b/components/engine/contrib/gitdm/aliases new file mode 100644 index 0000000000..dd5dd34335 --- /dev/null +++ b/components/engine/contrib/gitdm/aliases @@ -0,0 +1,148 @@ +Danny.Yates@mailonline.co.uk danny@codeaholics.org +KenCochrane@gmail.com kencochrane@gmail.com +LÉVEIL thomasleveil@gmail.com +Vincent.Bernat@exoscale.ch bernat@luffy.cx +acidburn@docker.com jess@docker.com +admin@jtlebi.fr jt@yadutaf.fr +ahmetalpbalkan@gmail.com ahmetb@microsoft.com +aj@gandi.net aj@gandi.net +albers@users.noreply.github.com github@albersweb.de +alexander.larsson@gmail.com alexl@redhat.com +amurdaca@redhat.com antonio.murdaca@gmail.com +amy@gandi.net aj@gandi.net +andrew.weiss@microsoft.com andrew.weiss@outlook.com +angt@users.noreply.github.com adrien@gallouet.fr +ankushagarwal@users.noreply.github.com ankushagarwal11@gmail.com +anonymouse2048@gmail.com lheckemann@twig-world.com +anusha@docker.com anusha.ragunathan@docker.com +asarai@suse.com asarai@suse.de +avi.miller@gmail.com avi.miller@oracle.com +bernat@luffy.cx Vincent.Bernat@exoscale.ch +bgoff@cpuguy83-mbp.home cpuguy83@gmail.com +brandon@ifup.co brandon@ifup.org +brent@docker.com brent.salisbury@docker.com +charmes.guillaume@gmail.com guillaume.charmes@docker.com +chenchun.feed@gmail.com ramichen@tencent.com +chooper@plumata.com charles.hooper@dotcloud.com +crosby.michael@gmail.com michael@docker.com +crosbymichael@gmail.com michael@docker.com +cyphar@cyphar.com asarai@suse.de +daehyeok@daehyeok-ui-MacBook-Air.local daehyeok@gmail.com +daehyeok@daehyeokui-MacBook-Air.local daehyeok@gmail.com +daniel.norberg@gmail.com dano@spotify.com +daniel@dotcloud.com daniel.mizyrycki@dotcloud.com +darren@rancher.com darren.s.shepherd@gmail.com +dave@dtucker.co.uk dt@docker.com +dev@vvieux.com victor.vieux@docker.com +dgasienica@zynga.com daniel@gasienica.ch +dnephin@gmail.com dnephin@docker.com +dominikh@fork-bomb.org dominik@honnef.co +dqminh89@gmail.com dqminh@cloudflare.com +dsxiao@dataman-inc.com dxiao@redhat.com +duglin@users.noreply.github.com dug@us.ibm.com +eric.hanchrow@gmail.com ehanchrow@ine.com +erik+github@hollensbe.org github@hollensbe.org +estesp@gmail.com estesp@linux.vnet.ibm.com +ewindisch@docker.com eric@windisch.us +f.joffrey@gmail.com joffrey@docker.com +fkautz@alumni.cmu.edu fkautz@redhat.com +frank.rosquin@gmail.com frank.rosquin+github@gmail.com +gh@mattyw.net mattyw@me.com +git@julienbordellier.com julienbordellier@gmail.com +github@metaliveblog.com github@developersupport.net +github@srid.name sridharr@activestate.com +guillaume.charmes@dotcloud.com guillaume.charmes@docker.com +guillaume@charmes.net guillaume.charmes@docker.com +guillaume@docker.com guillaume.charmes@docker.com +guillaume@dotcloud.com guillaume.charmes@docker.com +haoshuwei24@gmail.com haosw@cn.ibm.com +hollie.teal@docker.com hollie@docker.com +hollietealok@users.noreply.github.com hollie@docker.com +hsinko@users.noreply.github.com 21551195@zju.edu.cn +iamironbob@gmail.com altsysrq@gmail.com +icecrime@gmail.com arnaud.porterie@docker.com +jatzen@gmail.com jacob@jacobatzen.dk +jeff@allingeek.com jeff.nickoloff@gmail.com +jefferya@programmerq.net jeff@docker.com +jerome.petazzoni@dotcloud.com jerome.petazzoni@dotcloud.com +jfrazelle@users.noreply.github.com jess@docker.com +jhoward@microsoft.com John.Howard@microsoft.com +jlhawn@berkeley.edu josh.hawn@docker.com +joffrey@dotcloud.com joffrey@docker.com +john.howard@microsoft.com John.Howard@microsoft.com +jp@enix.org jerome.petazzoni@dotcloud.com +justin.cormack@unikernel.com justin.cormack@docker.com +justin.simonelis@PTS-JSIMON2.toronto.exclamation.com justin.p.simonelis@gmail.com +justin@specialbusservice.com justin.cormack@docker.com +katsuta_soshi@cyberagent.co.jp soshi.katsuta@gmail.com +kuehnle@online.de git.nivoc@neverbox.com +kwk@users.noreply.github.com konrad.wilhelm.kleine@gmail.com +leijitang@gmail.com leijitang@huawei.com +liubin0329@gmail.com liubin0329@users.noreply.github.com +lk4d4math@gmail.com lk4d4@docker.com +louis@dotcloud.com kalessin@kalessin.fr +lsm5@redhat.com lsm5@fedoraproject.org +lyndaoleary@hotmail.com lyndaoleary29@gmail.com +madhu@socketplane.io madhu@docker.com +martins@noironetworks.com aanm90@gmail.com +mary@docker.com mary.anthony@docker.com +mastahyeti@users.noreply.github.com mastahyeti@gmail.com +maztaim@users.noreply.github.com taim@bosboot.org +me@runcom.ninja antonio.murdaca@gmail.com +mheon@mheonlaptop.redhat.com mheon@redhat.com +michael@crosbymichael.com michael@docker.com +mohitsoni1989@gmail.com mosoni@ebay.com +moxieandmore@gmail.com mary.anthony@docker.com +moyses.furtado@wplex.com.br moysesb@gmail.com +msabramo@gmail.com marc@marc-abramowitz.com +mzdaniel@glidelink.net daniel.mizyrycki@dotcloud.com +nathan.leclaire@gmail.com nathan.leclaire@docker.com +nathanleclaire@gmail.com nathan.leclaire@docker.com +ostezer@users.noreply.github.com ostezer@gmail.com +peter@scraperwiki.com p@pwaller.net +princess@docker.com jess@docker.com +proppy@aminche.com proppy@google.com +qhuang@10.0.2.15 h.huangqiang@huawei.com +resouer@gmail.com resouer@163.com +roberto_hashioka@hotmail.com roberto.hashioka@docker.com +root@vagrant-ubuntu-12.10.vagrantup.com daniel.mizyrycki@dotcloud.com +runcom@linux.com antonio.murdaca@gmail.com +runcom@redhat.com antonio.murdaca@gmail.com +runcom@users.noreply.github.com antonio.murdaca@gmail.com +s@docker.com solomon@docker.com +shawnlandden@gmail.com shawn@churchofgit.com +singh.gurjeet@gmail.com gurjeet@singh.im +sjoerd@byte.nl sjoerd-github@linuxonly.nl +smahajan@redhat.com shishir.mahajan@redhat.com +solomon.hykes@dotcloud.com solomon@docker.com +solomon@dotcloud.com solomon@docker.com +stefanb@us.ibm.com stefanb@linux.vnet.ibm.com +stevvooe@users.noreply.github.com stephen.day@docker.com +superbaloo+registrations.github@superbaloo.net baloo@gandi.net +tangicolin@gmail.com tangicolin@gmail.com +thaJeztah@users.noreply.github.com github@gone.nl +thatcher@dotcloud.com thatcher@docker.com +thatcher@gmx.net thatcher@docker.com +tibor@docker.com teabee89@gmail.com +tiborvass@users.noreply.github.com teabee89@gmail.com +timruffles@googlemail.com oi@truffles.me.uk +tintypemolly@Ohui-MacBook-Pro.local tintypemolly@gmail.com +tj@init.me tejesh.mehta@gmail.com +tristan.carel@gmail.com tristan@cogniteev.com +unclejack@users.noreply.github.com cristian.staretu@gmail.com +unclejacksons@gmail.com cristian.staretu@gmail.com +vbatts@hashbangbash.com vbatts@redhat.com +victor.vieux@dotcloud.com victor.vieux@docker.com +victor@docker.com victor.vieux@docker.com +victor@dotcloud.com victor.vieux@docker.com +victorvieux@gmail.com victor.vieux@docker.com +vieux@docker.com victor.vieux@docker.com +vincent+github@demeester.fr vincent@sbr.pm +vincent@bernat.im bernat@luffy.cx +vojnovski@gmail.com viktor.vojnovski@amadeus.com +whoshuu@gmail.com huu@prismskylabs.com +xiaods@gmail.com dxiao@redhat.com +xlgao@zju.edu.cn xlgao@zju.edu.cn +yestin.sun@polyera.com sunyi0804@gmail.com +yuchangchun1@huawei.com yuchangchun1@huawei.com +zjaffee@us.ibm.com zij@case.edu diff --git a/components/engine/contrib/gitdm/domain-map b/components/engine/contrib/gitdm/domain-map new file mode 100644 index 0000000000..17a287e97a --- /dev/null +++ b/components/engine/contrib/gitdm/domain-map @@ -0,0 +1,47 @@ +# +# Docker +# + +docker.com Docker +dotcloud.com Docker + +aluzzardi@gmail.com Docker +cpuguy83@gmail.com Docker +derek@mcgstyle.net Docker +github@gone.nl Docker +kencochrane@gmail.com Docker +mickael.laventure@gmail.com Docker +sam.alba@gmail.com Docker +svendowideit@fosiki.com Docker +svendowideit@home.org.au Docker +tonistiigi@gmail.com Docker + +cristian.staretu@gmail.com Docker < 2015-01-01 +cristian.staretu@gmail.com Cisco + +github@hollensbe.org Docker < 2015-01-01 +github@hollensbe.org Cisco + +david.calavera@gmail.com Docker < 2016-04-01 +david.calavera@gmail.com (Unknown) + +madhu@socketplane.io Docker +ejhazlett@gmail.com Docker +ben@firshman.co.uk Docker + +vincent@sbr.pm (Unknown) < 2016-10-24 +vincent@sbr.pm Docker + +# +# Others +# + +cisco.com Cisco +google.com Google +ibm.com IBM +huawei.com Huawei +microsoft.com Microsoft + +redhat.com Red Hat +mrunalp@gmail.com Red Hat +antonio.murdaca@gmail.com Red Hat diff --git a/components/engine/contrib/gitdm/generate_aliases.sh b/components/engine/contrib/gitdm/generate_aliases.sh new file mode 100755 index 0000000000..dfff5ff204 --- /dev/null +++ b/components/engine/contrib/gitdm/generate_aliases.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# +# This script generates a gitdm compatible email aliases file from a git +# formatted .mailmap file. +# +# Usage: +# $> ./generate_aliases > aliases +# + +cat $1 | \ + grep -v '^#' | \ + sed 's/^[^<]*<\([^>]*\)>/\1/' | \ + grep '<.*>' | sed -e 's/[<>]/ /g' | \ + awk '{if ($3 != "") { print $3" "$1 } else {print $2" "$1}}' | \ + sort | uniq diff --git a/components/engine/contrib/gitdm/gitdm.config b/components/engine/contrib/gitdm/gitdm.config new file mode 100644 index 0000000000..d9b62b0b43 --- /dev/null +++ b/components/engine/contrib/gitdm/gitdm.config @@ -0,0 +1,17 @@ +# +# EmailAliases lets us cope with developers who use more +# than one address. +# +EmailAliases aliases + +# +# EmailMap does the main work of mapping addresses onto +# employers. +# +EmailMap domain-map + +# +# Use GroupMap to map a file full of addresses to the +# same employer +# +# GroupMap company-Docker Docker diff --git a/components/engine/contrib/httpserver/Dockerfile b/components/engine/contrib/httpserver/Dockerfile new file mode 100644 index 0000000000..747dc91bcf --- /dev/null +++ b/components/engine/contrib/httpserver/Dockerfile @@ -0,0 +1,4 @@ +FROM busybox +EXPOSE 80/tcp +COPY httpserver . +CMD ["./httpserver"] diff --git a/components/engine/contrib/httpserver/Dockerfile.solaris b/components/engine/contrib/httpserver/Dockerfile.solaris new file mode 100644 index 0000000000..3d0d691c17 --- /dev/null +++ b/components/engine/contrib/httpserver/Dockerfile.solaris @@ -0,0 +1,4 @@ +FROM solaris +EXPOSE 80/tcp +COPY httpserver . +CMD ["./httpserver"] diff --git a/components/engine/contrib/httpserver/server.go b/components/engine/contrib/httpserver/server.go new file mode 100644 index 0000000000..a75d5abb3d --- /dev/null +++ b/components/engine/contrib/httpserver/server.go @@ -0,0 +1,12 @@ +package main + +import ( + "log" + "net/http" +) + +func main() { + fs := http.FileServer(http.Dir("/static")) + http.Handle("/", fs) + log.Panic(http.ListenAndServe(":80", nil)) +} diff --git a/components/engine/contrib/init/openrc/docker.confd b/components/engine/contrib/init/openrc/docker.confd new file mode 100644 index 0000000000..89183de46b --- /dev/null +++ b/components/engine/contrib/init/openrc/docker.confd @@ -0,0 +1,23 @@ +# /etc/conf.d/docker: config file for /etc/init.d/docker + +# where the docker daemon output gets piped +# this contains both stdout and stderr. If you need to separate them, +# see the settings below +#DOCKER_LOGFILE="/var/log/docker.log" + +# where the docker daemon stdout gets piped +# if this is not set, DOCKER_LOGFILE is used +#DOCKER_OUTFILE="/var/log/docker-out.log" + +# where the docker daemon stderr gets piped +# if this is not set, DOCKER_LOGFILE is used +#DOCKER_ERRFILE="/var/log/docker-err.log" + +# where docker's pid get stored +#DOCKER_PIDFILE="/run/docker.pid" + +# where the docker daemon itself is run from +#DOCKERD_BINARY="/usr/bin/dockerd" + +# any other random options you want to pass to docker +DOCKER_OPTS="" diff --git a/components/engine/contrib/init/openrc/docker.initd b/components/engine/contrib/init/openrc/docker.initd new file mode 100644 index 0000000000..6c968f607e --- /dev/null +++ b/components/engine/contrib/init/openrc/docker.initd @@ -0,0 +1,24 @@ +#!/sbin/openrc-run +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +command="${DOCKERD_BINARY:-/usr/bin/dockerd}" +pidfile="${DOCKER_PIDFILE:-/run/${RC_SVCNAME}.pid}" +command_args="-p \"${pidfile}\" ${DOCKER_OPTS}" +DOCKER_LOGFILE="${DOCKER_LOGFILE:-/var/log/${RC_SVCNAME}.log}" +DOCKER_ERRFILE="${DOCKER_ERRFILE:-${DOCKER_LOGFILE}}" +DOCKER_OUTFILE="${DOCKER_OUTFILE:-${DOCKER_LOGFILE}}" +start_stop_daemon_args="--background \ + --stderr \"${DOCKER_ERRFILE}\" --stdout \"${DOCKER_OUTFILE}\"" + +start_pre() { + checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + + # Having non-zero limits causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + ulimit -u unlimited + + return 0 +} diff --git a/components/engine/contrib/init/systemd/REVIEWERS b/components/engine/contrib/init/systemd/REVIEWERS new file mode 100644 index 0000000000..b9ba55b3fb --- /dev/null +++ b/components/engine/contrib/init/systemd/REVIEWERS @@ -0,0 +1,3 @@ +Lokesh Mandvekar (@lsm5) +Brandon Philips (@philips) +Jessie Frazelle (@jfrazelle) diff --git a/components/engine/contrib/init/systemd/docker.service b/components/engine/contrib/init/systemd/docker.service new file mode 100644 index 0000000000..517463172b --- /dev/null +++ b/components/engine/contrib/init/systemd/docker.service @@ -0,0 +1,34 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network-online.target docker.socket firewalld.service +Wants=network-online.target +Requires=docker.socket + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/bin/dockerd -H fd:// +ExecReload=/bin/kill -s HUP $MAINPID +LimitNOFILE=1048576 +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process +# restart the docker process if it exits prematurely +Restart=on-failure +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/components/engine/contrib/init/systemd/docker.service.rpm b/components/engine/contrib/init/systemd/docker.service.rpm new file mode 100644 index 0000000000..6c60646b56 --- /dev/null +++ b/components/engine/contrib/init/systemd/docker.service.rpm @@ -0,0 +1,33 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network-online.target firewalld.service +Wants=network-online.target + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/bin/dockerd +ExecReload=/bin/kill -s HUP $MAINPID +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process +# restart the docker process if it exits prematurely +Restart=on-failure +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/components/engine/contrib/init/systemd/docker.socket b/components/engine/contrib/init/systemd/docker.socket new file mode 100644 index 0000000000..7dd95098e4 --- /dev/null +++ b/components/engine/contrib/init/systemd/docker.socket @@ -0,0 +1,12 @@ +[Unit] +Description=Docker Socket for the API +PartOf=docker.service + +[Socket] +ListenStream=/var/run/docker.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target diff --git a/components/engine/contrib/init/sysvinit-debian/docker b/components/engine/contrib/init/sysvinit-debian/docker new file mode 100755 index 0000000000..9c8fa6be73 --- /dev/null +++ b/components/engine/contrib/init/sysvinit-debian/docker @@ -0,0 +1,156 @@ +#!/bin/sh +set -e + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $syslog $remote_fs +# Required-Stop: $syslog $remote_fs +# Should-Start: cgroupfs-mount cgroup-lite +# Should-Stop: cgroupfs-mount cgroup-lite +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Create lightweight, portable, self-sufficient containers. +# Description: +# Docker is an open-source project to easily create lightweight, portable, +# self-sufficient containers from any application. The same container that a +# developer builds and tests on a laptop can run at scale, in production, on +# VMs, bare metal, OpenStack clusters, public clouds and more. +### END INIT INFO + +export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin + +BASE=docker + +# modify these in /etc/default/$BASE (/etc/default/docker) +DOCKERD=/usr/bin/dockerd +# This is the pid file managed by docker itself +DOCKER_PIDFILE=/var/run/$BASE.pid +# This is the pid file created/managed by start-stop-daemon +DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid +DOCKER_LOGFILE=/var/log/$BASE.log +DOCKER_OPTS= +DOCKER_DESC="Docker" + +# Get lsb functions +. /lib/lsb/init-functions + +if [ -f /etc/default/$BASE ]; then + . /etc/default/$BASE +fi + +# Check docker is present +if [ ! -x $DOCKERD ]; then + log_failure_msg "$DOCKERD not present or not executable" + exit 1 +fi + +check_init() { + # see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it directly) + if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then + log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" + exit 1 + fi +} + +fail_unless_root() { + if [ "$(id -u)" != '0' ]; then + log_failure_msg "$DOCKER_DESC must be run as root" + exit 1 + fi +} + +cgroupfs_mount() { + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + return + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +} + +case "$1" in + start) + check_init + + fail_unless_root + + cgroupfs_mount + + touch "$DOCKER_LOGFILE" + chgrp docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + + # Having non-zero limits causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + if [ "$BASH" ]; then + ulimit -u unlimited + else + ulimit -p unlimited + fi + + log_begin_msg "Starting $DOCKER_DESC: $BASE" + start-stop-daemon --start --background \ + --no-close \ + --exec "$DOCKERD" \ + --pidfile "$DOCKER_SSD_PIDFILE" \ + --make-pidfile \ + -- \ + -p "$DOCKER_PIDFILE" \ + $DOCKER_OPTS \ + >> "$DOCKER_LOGFILE" 2>&1 + log_end_msg $? + ;; + + stop) + check_init + fail_unless_root + if [ -f "$DOCKER_SSD_PIDFILE" ]; then + log_begin_msg "Stopping $DOCKER_DESC: $BASE" + start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" --retry 10 + log_end_msg $? + else + log_warning_msg "Docker already stopped - file $DOCKER_SSD_PIDFILE not found." + fi + ;; + + restart) + check_init + fail_unless_root + docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` + [ -n "$docker_pid" ] \ + && ps -p $docker_pid > /dev/null 2>&1 \ + && $0 stop + $0 start + ;; + + force-reload) + check_init + fail_unless_root + $0 restart + ;; + + status) + check_init + status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKERD" "$DOCKER_DESC" + ;; + + *) + echo "Usage: service docker {start|stop|restart|status}" + exit 1 + ;; +esac diff --git a/components/engine/contrib/init/sysvinit-debian/docker.default b/components/engine/contrib/init/sysvinit-debian/docker.default new file mode 100644 index 0000000000..c4e93199b4 --- /dev/null +++ b/components/engine/contrib/init/sysvinit-debian/docker.default @@ -0,0 +1,20 @@ +# Docker Upstart and SysVinit configuration file + +# +# THIS FILE DOES NOT APPLY TO SYSTEMD +# +# Please see the documentation for "systemd drop-ins": +# https://docs.docker.com/engine/admin/systemd/ +# + +# Customize location of Docker binary (especially for development testing). +#DOCKERD="/usr/local/bin/dockerd" + +# Use DOCKER_OPTS to modify the daemon startup options. +#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" + +# If you need Docker to use an HTTP proxy, it can also be specified here. +#export http_proxy="http://127.0.0.1:3128/" + +# This is also a handy place to tweak where Docker's temporary files go. +#export DOCKER_TMPDIR="/mnt/bigdrive/docker-tmp" diff --git a/components/engine/contrib/init/sysvinit-redhat/docker b/components/engine/contrib/init/sysvinit-redhat/docker new file mode 100755 index 0000000000..df9b02a2a4 --- /dev/null +++ b/components/engine/contrib/init/sysvinit-redhat/docker @@ -0,0 +1,153 @@ +#!/bin/sh +# +# /etc/rc.d/init.d/docker +# +# Daemon for docker.com +# +# chkconfig: 2345 95 95 +# description: Daemon for docker.com + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $network cgconfig +# Required-Stop: +# Should-Start: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: start and stop docker +# Description: Daemon for docker.com +### END INIT INFO + +# Source function library. +. /etc/rc.d/init.d/functions + +prog="docker" +unshare=/usr/bin/unshare +exec="/usr/bin/dockerd" +pidfile="/var/run/$prog.pid" +lockfile="/var/lock/subsys/$prog" +logfile="/var/log/$prog" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +prestart() { + service cgconfig status > /dev/null + + if [[ $? != 0 ]]; then + service cgconfig start + fi + +} + +start() { + if [ ! -x $exec ]; then + if [ ! -e $exec ]; then + echo "Docker executable $exec not found" + else + echo "You do not have permission to execute the Docker executable $exec" + fi + exit 5 + fi + + check_for_cleanup + + if ! [ -f $pidfile ]; then + prestart + printf "Starting $prog:\t" + echo "\n$(date)\n" >> $logfile + "$unshare" -m -- $exec $other_args >> $logfile 2>&1 & + pid=$! + touch $lockfile + # wait up to 10 seconds for the pidfile to exist. see + # https://github.com/docker/docker/issues/5359 + tries=0 + while [ ! -f $pidfile -a $tries -lt 10 ]; do + sleep 1 + tries=$((tries + 1)) + echo -n '.' + done + if [ ! -f $pidfile ]; then + failure + echo + exit 1 + fi + success + echo + else + failure + echo + printf "$pidfile still exists...\n" + exit 7 + fi +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile -d 300 $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + + +check_for_cleanup() { + if [ -f ${pidfile} ]; then + /bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile} + fi +} + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac + +exit $? diff --git a/components/engine/contrib/init/sysvinit-redhat/docker.sysconfig b/components/engine/contrib/init/sysvinit-redhat/docker.sysconfig new file mode 100644 index 0000000000..0864b3d77f --- /dev/null +++ b/components/engine/contrib/init/sysvinit-redhat/docker.sysconfig @@ -0,0 +1,7 @@ +# /etc/sysconfig/docker +# +# Other arguments to pass to the docker daemon process +# These will be parsed by the sysv initscript and appended +# to the arguments list passed to docker daemon + +other_args="" diff --git a/components/engine/contrib/init/upstart/REVIEWERS b/components/engine/contrib/init/upstart/REVIEWERS new file mode 100644 index 0000000000..03ee2dde3d --- /dev/null +++ b/components/engine/contrib/init/upstart/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/components/engine/contrib/init/upstart/docker.conf b/components/engine/contrib/init/upstart/docker.conf new file mode 100644 index 0000000000..d58f7d6ac8 --- /dev/null +++ b/components/engine/contrib/init/upstart/docker.conf @@ -0,0 +1,72 @@ +description "Docker daemon" + +start on (filesystem and net-device-up IFACE!=lo) +stop on runlevel [!2345] + +limit nofile 524288 1048576 + +# Having non-zero limits causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +limit nproc unlimited unlimited + +respawn + +kill timeout 20 + +pre-start script + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + exit 0 + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +end script + +script + # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) + DOCKERD=/usr/bin/dockerd + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + exec "$DOCKERD" $DOCKER_OPTS --raw-logs +end script + +# Don't emit "started" event until docker.sock is ready. +# See https://github.com/docker/docker/issues/6647 +post-start script + DOCKER_OPTS= + DOCKER_SOCKET= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + + if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then + DOCKER_SOCKET=/var/run/docker.sock + else + DOCKER_SOCKET=$(printf "%s" "$DOCKER_OPTS" | grep -oP -e '(-H|--host)\W*unix://\K(\S+)' | sed 1q) + fi + + if [ -n "$DOCKER_SOCKET" ]; then + while ! [ -e "$DOCKER_SOCKET" ]; do + initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 + echo "Waiting for $DOCKER_SOCKET" + sleep 0.1 + done + echo "$DOCKER_SOCKET is up" + fi +end script diff --git a/components/engine/contrib/mac-install-bundle.sh b/components/engine/contrib/mac-install-bundle.sh new file mode 100755 index 0000000000..2110d044d0 --- /dev/null +++ b/components/engine/contrib/mac-install-bundle.sh @@ -0,0 +1,45 @@ +#!/bin/sh + +set -e + +errexit() { + echo "$1" + exit 1 +} + +[ "$(uname -s)" == "Darwin" ] || errexit "This script can only be used on a Mac" + +[ $# -eq 1 ] || errexit "Usage: $0 install|undo" + +BUNDLE="bundles/$(cat VERSION)" +BUNDLE_PATH="$PWD/$BUNDLE" +CLIENT_PATH="$BUNDLE_PATH/cross/darwin/amd64/docker" +DATABASE="$HOME/Library/Containers/com.docker.docker/Data/database" +DATABASE_KEY="$DATABASE/com.docker.driver.amd64-linux/bundle" + +[ -d "$DATABASE" ] || errexit "Docker for Mac must be installed for this script" + +case "$1" in +"install") + [ -d "$BUNDLE" ] || errexit "cannot find bundle $BUNDLE" + [ -e "$CLIENT_PATH" ] || errexit "you need to run make cross first" + [ -e "$BUNDLE/binary-daemon/dockerd" ] || errexit "you need to build binaries first" + [ -f "$BUNDLE/binary-client/docker" ] || errexit "you need to build binaries first" + git -C "$DATABASE" reset --hard >/dev/null + echo "$BUNDLE_PATH" > "$DATABASE_KEY" + git -C "$DATABASE" add "$DATABASE_KEY" + git -C "$DATABASE" commit -m "update bundle to $BUNDLE_PATH" + rm -f /usr/local/bin/docker + cp "$CLIENT_PATH" /usr/local/bin + echo "Bundle installed. Restart Docker to use. To uninstall, reset Docker to factory defaults." + ;; +"undo") + git -C "$DATABASE" reset --hard >/dev/null + [ -f "$DATABASE_KEY" ] || errexit "bundle not set" + git -C "$DATABASE" rm "$DATABASE_KEY" + git -C "$DATABASE" commit -m "remove bundle" + rm -f /usr/local/bin/docker + ln -s "$HOME/Library/Group Containers/group.com.docker/bin/docker" /usr/local/bin + echo "Bundle removed. Using dev versions may cause issues, a reset to factory defaults is recommended." + ;; +esac diff --git a/components/engine/contrib/mkimage-alpine.sh b/components/engine/contrib/mkimage-alpine.sh new file mode 100755 index 0000000000..47cd35ce62 --- /dev/null +++ b/components/engine/contrib/mkimage-alpine.sh @@ -0,0 +1,87 @@ +#!/bin/sh + +set -e + +[ $(id -u) -eq 0 ] || { + printf >&2 '%s requires root\n' "$0" + exit 1 +} + +usage() { + printf >&2 '%s: [-r release] [-m mirror] [-s] [-c additional repository]\n' "$0" + exit 1 +} + +tmp() { + TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX) + ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX) + trap "rm -rf $TMP $ROOTFS" EXIT TERM INT +} + +apkv() { + curl -sSL $MAINREPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | + grep --text '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 +} + +getapk() { + curl -sSL $MAINREPO/$ARCH/apk-tools-static-$(apkv).apk | + tar -xz -C $TMP sbin/apk.static +} + +mkbase() { + $TMP/sbin/apk.static --repository $MAINREPO --update-cache --allow-untrusted \ + --root $ROOTFS --initdb add alpine-base +} + +conf() { + printf '%s\n' $MAINREPO > $ROOTFS/etc/apk/repositories + printf '%s\n' $ADDITIONALREPO >> $ROOTFS/etc/apk/repositories +} + +pack() { + local id + id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL) + + docker tag $id alpine:latest + docker run -i -t --rm alpine printf 'alpine:%s with id=%s created!\n' $REL $id +} + +save() { + [ $SAVE -eq 1 ] || return + + tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz +} + +while getopts "hr:m:s" opt; do + case $opt in + r) + REL=$OPTARG + ;; + m) + MIRROR=$OPTARG + ;; + s) + SAVE=1 + ;; + c) + ADDITIONALREPO=community + ;; + *) + usage + ;; + esac +done + +REL=${REL:-edge} +MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine} +SAVE=${SAVE:-0} +MAINREPO=$MIRROR/$REL/main +ADDITIONALREPO=$MIRROR/$REL/community +ARCH=${ARCH:-$(uname -m)} + +tmp +getapk +mkbase +conf +pack +save diff --git a/components/engine/contrib/mkimage-arch-pacman.conf b/components/engine/contrib/mkimage-arch-pacman.conf new file mode 100644 index 0000000000..45fe03dc96 --- /dev/null +++ b/components/engine/contrib/mkimage-arch-pacman.conf @@ -0,0 +1,92 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = auto + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/components/engine/contrib/mkimage-arch.sh b/components/engine/contrib/mkimage-arch.sh new file mode 100755 index 0000000000..f941177122 --- /dev/null +++ b/components/engine/contrib/mkimage-arch.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for archlinux and load it into the local +# docker as "archlinux" +# requires root +set -e + +hash pacstrap &>/dev/null || { + echo "Could not find pacstrap. Run pacman -S arch-install-scripts" + exit 1 +} + +hash expect &>/dev/null || { + echo "Could not find expect. Run pacman -S expect" + exit 1 +} + + +export LANG="C.UTF-8" + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX) +chmod 755 $ROOTFS + +# packages to ignore for space savings +PKGIGNORE=( + cryptsetup + device-mapper + dhcpcd + iproute2 + jfsutils + linux + lvm2 + man-db + man-pages + mdadm + nano + netctl + openresolv + pciutils + pcmciautils + reiserfsprogs + s-nail + systemd-sysvcompat + usbutils + vi + xfsprogs +) +IFS=',' +PKGIGNORE="${PKGIGNORE[*]}" +unset IFS + +arch="$(uname -m)" +case "$arch" in + armv*) + if pacman -Q archlinuxarm-keyring >/dev/null 2>&1; then + pacman-key --init + pacman-key --populate archlinuxarm + else + echo "Could not find archlinuxarm-keyring. Please, install it and run pacman-key --populate archlinuxarm" + exit 1 + fi + PACMAN_CONF=$(mktemp ${TMPDIR:-/var/tmp}/pacman-conf-archlinux-XXXXXXXXX) + version="$(echo $arch | cut -c 5)" + sed "s/Architecture = armv/Architecture = armv${version}h/g" './mkimage-archarm-pacman.conf' > "${PACMAN_CONF}" + PACMAN_MIRRORLIST='Server = http://mirror.archlinuxarm.org/$arch/$repo' + PACMAN_EXTRA_PKGS='archlinuxarm-keyring' + EXPECT_TIMEOUT=1800 # Most armv* based devices can be very slow (e.g. RPiv1) + ARCH_KEYRING=archlinuxarm + DOCKER_IMAGE_NAME="armv${version}h/archlinux" + ;; + *) + PACMAN_CONF='./mkimage-arch-pacman.conf' + PACMAN_MIRRORLIST='Server = https://mirrors.kernel.org/archlinux/$repo/os/$arch' + PACMAN_EXTRA_PKGS='' + EXPECT_TIMEOUT=60 + ARCH_KEYRING=archlinux + DOCKER_IMAGE_NAME=archlinux + ;; +esac + +export PACMAN_MIRRORLIST + +expect < $ROOTFS/etc/locale.gen +arch-chroot $ROOTFS locale-gen +arch-chroot $ROOTFS /bin/sh -c 'echo $PACMAN_MIRRORLIST > /etc/pacman.d/mirrorlist' + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 +ln -sf /proc/self/fd $DEV/fd + +tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - $DOCKER_IMAGE_NAME +docker run --rm -t $DOCKER_IMAGE_NAME echo Success. +rm -rf $ROOTFS diff --git a/components/engine/contrib/mkimage-archarm-pacman.conf b/components/engine/contrib/mkimage-archarm-pacman.conf new file mode 100644 index 0000000000..f4b45f54d7 --- /dev/null +++ b/components/engine/contrib/mkimage-archarm-pacman.conf @@ -0,0 +1,98 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = armv + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +[alarm] +Include = /etc/pacman.d/mirrorlist + +[aur] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/components/engine/contrib/mkimage-busybox.sh b/components/engine/contrib/mkimage-busybox.sh new file mode 100755 index 0000000000..b11a6bb265 --- /dev/null +++ b/components/engine/contrib/mkimage-busybox.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Generate a very minimal filesystem based on busybox-static, +# and load it into the local docker under the name "busybox". + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static' +echo >&2 + +BUSYBOX=$(which busybox) +[ "$BUSYBOX" ] || { + echo "Sorry, I could not locate busybox." + echo "Try 'apt-get install busybox-static'?" + exit 1 +} + +set -e +ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM +mkdir $ROOTFS +cd $ROOTFS + +mkdir bin etc dev dev/pts lib proc sys tmp +touch etc/resolv.conf +cp /etc/nsswitch.conf etc/nsswitch.conf +echo root:x:0:0:root:/:/bin/sh > etc/passwd +echo root:x:0: > etc/group +ln -s lib lib64 +ln -s bin sbin +cp $BUSYBOX bin +for X in $(busybox --list) +do + ln -s busybox bin/$X +done +rm bin/init +ln bin/busybox bin/init +cp /lib/x86_64-linux-gnu/lib{pthread,c,dl,nsl,nss_*}.so.* lib +cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib +for X in console null ptmx random stdin stdout stderr tty urandom zero +do + cp -a /dev/$X dev +done + +tar --numeric-owner -cf- . | docker import - busybox +docker run -i -u root busybox /bin/echo Success. diff --git a/components/engine/contrib/mkimage-crux.sh b/components/engine/contrib/mkimage-crux.sh new file mode 100755 index 0000000000..3f0bdcae3c --- /dev/null +++ b/components/engine/contrib/mkimage-crux.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for CRUX/Linux and load it into the local +# docker as "cruxlinux" +# requires root and the crux iso (http://crux.nu) + +set -e + +die () { + echo >&2 "$@" + exit 1 +} + +[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" + +ISO=${1} + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX) +CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX) +TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX) + +VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') + +# Mount the ISO +mount -o ro,loop $ISO $CRUX + +# Extract pkgutils +tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz + +# Put pkgadd in the $PATH +export PATH="$TMP/usr/bin:$PATH" + +# Install core packages +mkdir -p $ROOTFS/var/lib/pkg +touch $ROOTFS/var/lib/pkg/db +for pkg in $CRUX/crux/core/*; do + pkgadd -r $ROOTFS $pkg +done + +# Remove agetty and inittab config +if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then + echo "Removing agetty from /etc/inittab ..." + chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab +fi + +# Remove kernel source +rm -rf $ROOTFS/usr/src/* + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 + +IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) +docker tag $IMAGE_ID crux:latest +docker run -i -t crux echo Success. + +# Cleanup +umount $CRUX +rm -rf $ROOTFS +rm -rf $CRUX +rm -rf $TMP diff --git a/components/engine/contrib/mkimage-debootstrap.sh b/components/engine/contrib/mkimage-debootstrap.sh new file mode 100755 index 0000000000..412a5ce0a7 --- /dev/null +++ b/components/engine/contrib/mkimage-debootstrap.sh @@ -0,0 +1,297 @@ +#!/usr/bin/env bash +set -e + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap' +echo >&2 + +variant='minbase' +include='iproute,iputils-ping' +arch='amd64' # intentionally undocumented for now +skipDetection= +strictDebootstrap= +justTar= + +usage() { + echo >&2 + + echo >&2 "usage: $0 [options] repo suite [mirror]" + + echo >&2 + echo >&2 'options: (not recommended)' + echo >&2 " -p set an http_proxy for debootstrap" + echo >&2 " -v $variant # change default debootstrap variant" + echo >&2 " -i $include # change default package includes" + echo >&2 " -d # strict debootstrap (do not apply any docker-specific tweaks)" + echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)" + echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list" + echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)" + + echo >&2 + echo >&2 " ie: $0 username/debian squeeze" + echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/" + + echo >&2 + echo >&2 " ie: $0 username/ubuntu precise" + echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/" + + echo >&2 + echo >&2 " ie: $0 -t precise.tar.bz2 precise" + echo >&2 " $0 -t wheezy.tgz wheezy" + echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/" + + echo >&2 +} + +# these should match the names found at http://www.debian.org/releases/ +debianStable=wheezy +debianUnstable=sid +# this should match the name found at http://releases.ubuntu.com/ +ubuntuLatestLTS=trusty +# this should match the name found at http://releases.tanglu.org/ +tangluLatest=aequorea + +while getopts v:i:a:p:dst name; do + case "$name" in + p) + http_proxy="$OPTARG" + ;; + v) + variant="$OPTARG" + ;; + i) + include="$OPTARG" + ;; + a) + arch="$OPTARG" + ;; + d) + strictDebootstrap=1 + ;; + s) + skipDetection=1 + ;; + t) + justTar=1 + ;; + ?) + usage + exit 0 + ;; + esac +done +shift $(($OPTIND - 1)) + +repo="$1" +suite="$2" +mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided + +if [ ! "$repo" ] || [ ! "$suite" ]; then + usage + exit 1 +fi + +# some rudimentary detection for whether we need to "sudo" our docker calls +docker='' +if docker version > /dev/null 2>&1; then + docker='docker' +elif sudo docker version > /dev/null 2>&1; then + docker='sudo docker' +elif command -v docker > /dev/null 2>&1; then + docker='docker' +else + echo >&2 "warning: either docker isn't installed, or your current user cannot run it;" + echo >&2 " this script is not likely to work as expected" + sleep 3 + docker='docker' # give us a command-not-found later +fi + +# make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory +if [ "$justTar" ]; then + if [ ! -d "$(dirname "$repo")" ]; then + echo >&2 "error: $(dirname "$repo") does not exist" + exit 1 + fi + repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")" +fi + +# will be filled in later, if [ -z "$skipDetection" ] +lsbDist='' + +target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM" + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" +returnTo="$(pwd -P)" + +if [ "$suite" = 'lucid' ]; then + # lucid fails and doesn't include gpgv in minbase; "apt-get update" fails + include+=',gpgv' +fi + +set -x + +# bootstrap +mkdir -p "$target" +sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror" + +cd "$target" + +if [ -z "$strictDebootstrap" ]; then + # prevent init scripts from running during install/update + # policy-rc.d (for most scripts) + echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null + sudo chmod +x usr/sbin/policy-rc.d + # initctl (for some pesky upstart scripts) + sudo chroot . dpkg-divert --local --rename --add /sbin/initctl + sudo ln -sf /bin/true sbin/initctl + # see https://github.com/docker/docker/issues/446#issuecomment-16953173 + + # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) + sudo chroot . apt-get clean + + if strings usr/bin/dpkg | grep -q unsafe-io; then + # while we're at it, apt is unnecessarily slow inside containers + # this forces dpkg not to call sync() after package extraction and speeds up install + # the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization + echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null + # we have this wrapped up in an "if" because the "force-unsafe-io" + # option was added in dpkg 1.15.8.6 + # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82), + # and ubuntu lucid/10.04 only has 1.15.5.6 + fi + + # we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context) + { + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo "DPkg::Post-Invoke { ${aptGetClean} };" + echo "APT::Update::Post-Invoke { ${aptGetClean} };" + echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' + } | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null + + # and remove the translations, too + echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null + + # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them): + # rm /usr/sbin/policy-rc.d + # rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl + # rm /etc/dpkg/dpkg.cfg.d/02apt-speedup + # rm /etc/apt/apt.conf.d/no-cache + # rm /etc/apt/apt.conf.d/no-languages + + if [ -z "$skipDetection" ]; then + # see also rudimentary platform detection in hack/install.sh + lsbDist='' + if [ -r etc/lsb-release ]; then + lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then + lsbDist='Debian' + fi + + case "$lsbDist" in + Debian) + # add the updates and security repositories + if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then + # ${suite}-updates only applies to non-unstable + sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list + + # same for security updates + echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null + fi + ;; + Ubuntu) + # add the universe, updates, and security repositories + sudo sed -i " + s/ $suite main$/ $suite main universe/; p; + s/ $suite main/ ${suite}-updates main/; p; + s/ $suite-updates main/ ${suite}-security main/ + " etc/apt/sources.list + ;; + Tanglu) + # add the updates repository + if [ "$suite" = "$tangluLatest" ]; then + # ${suite}-updates only applies to stable Tanglu versions + sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list + fi + ;; + SteamOS) + # add contrib and non-free + sudo sed -i "s/ $suite main$/ $suite main contrib non-free/" etc/apt/sources.list + ;; + esac + fi + + # make sure our packages lists are as up to date as we can get them + sudo chroot . apt-get update + sudo chroot . apt-get dist-upgrade -y +fi + +if [ "$justTar" ]; then + # create the tarball file so it has the right permissions (ie, not root) + touch "$repo" + + # fill the tarball + sudo tar --numeric-owner -caf "$repo" . +else + # create the image (and tag $repo:$suite) + sudo tar --numeric-owner -c . | $docker import - $repo:$suite + + # test the image + $docker run -i -t $repo:$suite echo success + + if [ -z "$skipDetection" ]; then + case "$lsbDist" in + Debian) + if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + + if [ -r etc/debian_version ]; then + # tag the specific debian release version (which is only reasonable to tag on debian stable) + ver=$(cat etc/debian_version) + $docker tag $repo:$suite $repo:$ver + fi + fi + ;; + Ubuntu) + if [ "$suite" = "$ubuntuLatestLTS" ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + fi + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific Ubuntu version number, if available (12.04, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + Tanglu) + if [ "$suite" = "$tangluLatest" ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + fi + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific Tanglu version number, if available (1.0, 2.0, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + SteamOS) + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific SteamOS version number, if available (1.0, 2.0, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + esac + fi +fi + +# cleanup +cd "$returnTo" +sudo rm -rf "$target" diff --git a/components/engine/contrib/mkimage-pld.sh b/components/engine/contrib/mkimage-pld.sh new file mode 100755 index 0000000000..615c2030a3 --- /dev/null +++ b/components/engine/contrib/mkimage-pld.sh @@ -0,0 +1,73 @@ +#!/bin/sh +# +# Generate a minimal filesystem for PLD Linux and load it into the local docker as "pld". +# https://www.pld-linux.org/packages/docker +# +set -e + +if [ "$(id -u)" != "0" ]; then + echo >&2 "$0: requires root" + exit 1 +fi + +image_name=pld + +tmpdir=$(mktemp -d ${TMPDIR:-/var/tmp}/pld-docker-XXXXXX) +root=$tmpdir/rootfs +install -d -m 755 $root + +# to clean up: +docker rmi $image_name || : + +# build +rpm -r $root --initdb + +set +e +install -d $root/dev/pts +mknod $root/dev/random c 1 8 -m 644 +mknod $root/dev/urandom c 1 9 -m 644 +mknod $root/dev/full c 1 7 -m 666 +mknod $root/dev/null c 1 3 -m 666 +mknod $root/dev/zero c 1 5 -m 666 +mknod $root/dev/console c 5 1 -m 660 +set -e + +poldek -r $root --up --noask -u \ + --noignore \ + -O 'rpmdef=_install_langs C' \ + -O 'rpmdef=_excludedocs 1' \ + vserver-packages \ + bash iproute2 coreutils grep poldek + +# fix netsharedpath, so containers would be able to install when some paths are mounted +sed -i -e 's;^#%_netsharedpath.*;%_netsharedpath /dev/shm:/sys:/proc:/dev:/etc/hostname;' $root/etc/rpm/macros + +# no need for alternatives +poldek-config -c $root/etc/poldek/poldek.conf ignore systemd-init + +# this makes initscripts to believe network is up +touch $root/var/lock/subsys/network + +# cleanup large optional packages +remove_packages="ca-certificates" +for pkg in $remove_packages; do + rpm -r $root -q $pkg && rpm -r $root -e $pkg --nodeps +done + +# cleanup more +rm -v $root/etc/ld.so.cache +rm -rfv $root/var/cache/hrmib/* +rm -rfv $root/usr/share/man/man?/* +rm -rfv $root/usr/share/locale/*/ +rm -rfv $root/usr/share/help/*/ +rm -rfv $root/usr/share/doc/* +rm -rfv $root/usr/src/examples/* +rm -rfv $root/usr/share/pixmaps/* + +# and import +tar --numeric-owner --xattrs --acls -C $root -c . | docker import - $image_name + +# and test +docker run -i -u root $image_name /bin/echo Success. + +rm -r $tmpdir diff --git a/components/engine/contrib/mkimage-rinse.sh b/components/engine/contrib/mkimage-rinse.sh new file mode 100755 index 0000000000..7e0935062f --- /dev/null +++ b/components/engine/contrib/mkimage-rinse.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. + +# This script is useful on systems with rinse available (e.g., +# building a CentOS image on Debian). See contrib/mkimage-yum.sh for +# a way to build CentOS images on systems with yum installed. + +set -e + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse' +echo >&2 + +repo="$1" +distro="$2" +mirror="$3" + +if [ ! "$repo" ] || [ ! "$distro" ]; then + self="$(basename $0)" + echo >&2 "usage: $self repo distro [mirror]" + echo >&2 + echo >&2 " ie: $self username/centos centos-5" + echo >&2 " $self username/centos centos-6" + echo >&2 + echo >&2 " ie: $self username/slc slc-5" + echo >&2 " $self username/slc slc-6" + echo >&2 + echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/" + echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/" + echo >&2 + echo >&2 'See /etc/rinse for supported values of "distro" and for examples of' + echo >&2 ' expected values of "mirror".' + echo >&2 + echo >&2 'This script is tested to work with the original upstream version of rinse,' + echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at' + echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.' + echo >&2 + exit 1 +fi + +target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM" + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" +returnTo="$(pwd -P)" + +rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" ) +if [ "$mirror" ]; then + rinseArgs+=( --mirror "$mirror" ) +fi + +set -x + +mkdir -p "$target" + +sudo rinse "${rinseArgs[@]}" + +cd "$target" + +# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own +sudo rm -rf dev +sudo mkdir -m 755 dev +( + cd dev + sudo ln -sf /proc/self/fd ./ + sudo mkdir -m 755 pts + sudo mkdir -m 1777 shm + sudo mknod -m 600 console c 5 1 + sudo mknod -m 600 initctl p + sudo mknod -m 666 full c 1 7 + sudo mknod -m 666 null c 1 3 + sudo mknod -m 666 ptmx c 5 2 + sudo mknod -m 666 random c 1 8 + sudo mknod -m 666 tty c 5 0 + sudo mknod -m 666 tty0 c 4 0 + sudo mknod -m 666 urandom c 1 9 + sudo mknod -m 666 zero c 1 5 +) + +# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" +# locales +sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} +# docs and man pages +sudo rm -rf usr/share/{man,doc,info,gnome/help} +# cracklib +sudo rm -rf usr/share/cracklib +# i18n +sudo rm -rf usr/share/i18n +# yum cache +sudo rm -rf var/cache/yum +sudo mkdir -p --mode=0755 var/cache/yum +# sln +sudo rm -rf sbin/sln +# ldconfig +#sudo rm -rf sbin/ldconfig +sudo rm -rf etc/ld.so.cache var/cache/ldconfig +sudo mkdir -p --mode=0755 var/cache/ldconfig + +# allow networking init scripts inside the container to work without extra steps +echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null + +# to restore locales later: +# yum reinstall glibc-common + +version= +if [ -r etc/redhat-release ]; then + version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)" +elif [ -r etc/SuSE-release ]; then + version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)" +fi + +if [ -z "$version" ]; then + echo >&2 "warning: cannot autodetect OS version, using $distro as tag" + sleep 20 + version="$distro" +fi + +sudo tar --numeric-owner -c . | docker import - $repo:$version + +docker run -i -t $repo:$version echo success + +cd "$returnTo" +sudo rm -rf "$target" diff --git a/components/engine/contrib/mkimage-yum.sh b/components/engine/contrib/mkimage-yum.sh new file mode 100755 index 0000000000..29da170480 --- /dev/null +++ b/components/engine/contrib/mkimage-yum.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. +# +# This script is useful on systems with yum installed (e.g., building +# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way +# to build CentOS images on other systems. + +set -e + +usage() { + cat < +OPTIONS: + -p "" The list of packages to install in the container. + The default is blank. + -g "" The groups of packages to install in the container. + The default is "Core". + -y The path to the yum config to install packages from. The + default is /etc/yum.conf for Centos/RHEL and /etc/dnf/dnf.conf for Fedora +EOOPTS + exit 1 +} + +# option defaults +yum_config=/etc/yum.conf +if [ -f /etc/dnf/dnf.conf ] && command -v dnf &> /dev/null; then + yum_config=/etc/dnf/dnf.conf + alias yum=dnf +fi +install_groups="Core" +while getopts ":y:p:g:h" opt; do + case $opt in + y) + yum_config=$OPTARG + ;; + h) + usage + ;; + p) + install_packages="$OPTARG" + ;; + g) + install_groups="$OPTARG" + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + ;; + esac +done +shift $((OPTIND - 1)) +name=$1 + +if [[ -z $name ]]; then + usage +fi + +target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) + +set -x + +mkdir -m 755 "$target"/dev +mknod -m 600 "$target"/dev/console c 5 1 +mknod -m 600 "$target"/dev/initctl p +mknod -m 666 "$target"/dev/full c 1 7 +mknod -m 666 "$target"/dev/null c 1 3 +mknod -m 666 "$target"/dev/ptmx c 5 2 +mknod -m 666 "$target"/dev/random c 1 8 +mknod -m 666 "$target"/dev/tty c 5 0 +mknod -m 666 "$target"/dev/tty0 c 4 0 +mknod -m 666 "$target"/dev/urandom c 1 9 +mknod -m 666 "$target"/dev/zero c 1 5 + +# amazon linux yum will fail without vars set +if [ -d /etc/yum/vars ]; then + mkdir -p -m 755 "$target"/etc/yum + cp -a /etc/yum/vars "$target"/etc/yum/ +fi + +if [[ -n "$install_groups" ]]; +then + yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y groupinstall $install_groups +fi + +if [[ -n "$install_packages" ]]; +then + yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y install $install_packages +fi + +yum -c "$yum_config" --installroot="$target" -y clean all + +cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" + version=$name +fi + +tar --numeric-owner -c -C "$target" . | docker import - $name:$version + +docker run -i -t --rm $name:$version /bin/bash -c 'echo success' + +rm -rf "$target" diff --git a/components/engine/contrib/mkimage.sh b/components/engine/contrib/mkimage.sh new file mode 100755 index 0000000000..13298c8036 --- /dev/null +++ b/components/engine/contrib/mkimage.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash +set -e + +mkimg="$(basename "$0")" + +usage() { + echo >&2 "usage: $mkimg [-d dir] [-t tag] [--compression algo| --no-compression] script [script-args]" + echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" + echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components=main,universe trusty" + echo >&2 " $mkimg -t someuser/busybox busybox-static" + echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/" + echo >&2 " $mkimg -t someuser/solaris solaris" + exit 1 +} + +scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" + +os= +os=$(uname -o) + +# set up path to gnu tools if solaris +[[ $os == "Solaris" ]] && export PATH=/usr/gnu/bin:$PATH +# TODO check for gnu-tar, gnu-getopt + +# TODO requires root/sudo due to some pkg operations. sigh. +[[ $os == "Solaris" && $EUID != "0" ]] && echo >&2 "image create on Solaris requires superuser privilege" + +optTemp=$(getopt --options '+d:t:c:hC' --longoptions 'dir:,tag:,compression:,no-compression,help' --name "$mkimg" -- "$@") +eval set -- "$optTemp" +unset optTemp + +dir= +tag= +compression="auto" +while true; do + case "$1" in + -d|--dir) dir="$2" ; shift 2 ;; + -t|--tag) tag="$2" ; shift 2 ;; + --compression) compression="$2" ; shift 2 ;; + --no-compression) compression="none" ; shift 1 ;; + -h|--help) usage ;; + --) shift ; break ;; + esac +done + +script="$1" +[ "$script" ] || usage +shift + +if [ "$compression" == 'auto' ] || [ -z "$compression" ] +then + compression='xz' +fi + +[ "$compression" == 'none' ] && compression='' + +if [ ! -x "$scriptDir/$script" ]; then + echo >&2 "error: $script does not exist or is not executable" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +# don't mistake common scripts like .febootstrap-minimize as image-creators +if [[ "$script" == .* ]]; then + echo >&2 "error: $script is a script helper, not a script" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +delDir= +if [ -z "$dir" ]; then + dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)" + delDir=1 +fi + +rootfsDir="$dir/rootfs" +( set -x; mkdir -p "$rootfsDir" ) + +# pass all remaining arguments to $script +"$scriptDir/$script" "$rootfsDir" "$@" + +# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them +rm -rf "$rootfsDir/dev" "$rootfsDir/proc" +mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" + +# make sure /etc/resolv.conf has something useful in it +mkdir -p "$rootfsDir/etc" +cat > "$rootfsDir/etc/resolv.conf" <<'EOF' +nameserver 8.8.8.8 +nameserver 8.8.4.4 +EOF + +tarFile="$dir/rootfs.tar${compression:+.$compression}" +touch "$tarFile" + +( + set -x + tar --numeric-owner --create --auto-compress --file "$tarFile" --directory "$rootfsDir" --transform='s,^./,,' . +) + +echo >&2 "+ cat > '$dir/Dockerfile'" +cat > "$dir/Dockerfile" <> "$dir/Dockerfile" ) + break + fi +done + +( set -x; rm -rf "$rootfsDir" ) + +if [ "$tag" ]; then + ( set -x; docker build -t "$tag" "$dir" ) +elif [ "$delDir" ]; then + # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ + ( set -x; docker build "$dir" ) +fi + +if [ "$delDir" ]; then + ( set -x; rm -rf "$dir" ) +fi diff --git a/components/engine/contrib/mkimage/.febootstrap-minimize b/components/engine/contrib/mkimage/.febootstrap-minimize new file mode 100755 index 0000000000..7749e63fb0 --- /dev/null +++ b/components/engine/contrib/mkimage/.febootstrap-minimize @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +( + cd "$rootfsDir" + + # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" + # locales + rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} + # docs and man pages + rm -rf usr/share/{man,doc,info,gnome/help} + # cracklib + rm -rf usr/share/cracklib + # i18n + rm -rf usr/share/i18n + # yum cache + rm -rf var/cache/yum + mkdir -p --mode=0755 var/cache/yum + # sln + rm -rf sbin/sln + # ldconfig + #rm -rf sbin/ldconfig + rm -rf etc/ld.so.cache var/cache/ldconfig + mkdir -p --mode=0755 var/cache/ldconfig +) diff --git a/components/engine/contrib/mkimage/busybox-static b/components/engine/contrib/mkimage/busybox-static new file mode 100755 index 0000000000..e15322b49d --- /dev/null +++ b/components/engine/contrib/mkimage/busybox-static @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +busybox="$(which busybox 2>/dev/null || true)" +if [ -z "$busybox" ]; then + echo >&2 'error: busybox: not found' + echo >&2 ' install it with your distribution "busybox-static" package' + exit 1 +fi +if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then + echo >&2 "error: '$busybox' appears to be a dynamic executable" + echo >&2 ' you should install your distribution "busybox-static" package instead' + exit 1 +fi + +mkdir -p "$rootfsDir/bin" +rm -f "$rootfsDir/bin/busybox" # just in case +cp "$busybox" "$rootfsDir/bin/busybox" + +( + cd "$rootfsDir" + + IFS=$'\n' + modules=( $(bin/busybox --list-modules) ) + unset IFS + + for module in "${modules[@]}"; do + mkdir -p "$(dirname "$module")" + ln -sf /bin/busybox "$module" + done +) diff --git a/components/engine/contrib/mkimage/debootstrap b/components/engine/contrib/mkimage/debootstrap new file mode 100755 index 0000000000..9f7d8987ad --- /dev/null +++ b/components/engine/contrib/mkimage/debootstrap @@ -0,0 +1,251 @@ +#!/usr/bin/env bash +set -e + +mkimgdeb="$(basename "$0")" +mkimg="$(dirname "$0").sh" + +usage() { + echo >&2 "usage: $mkimgdeb rootfsDir suite [debootstrap-args]" + echo >&2 " note: $mkimgdeb meant to be used from $mkimg" + exit 1 +} + +rootfsDir="$1" +if [ -z "$rootfsDir" ]; then + echo >&2 "error: rootfsDir is missing" + echo >&2 + usage +fi +shift + +# we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap + +before=() +while [ $# -gt 0 ] && [[ "$1" == -* ]]; do + before+=( "$1" ) + shift +done + +suite="$1" +if [ -z "$suite" ]; then + echo >&2 "error: suite is missing" + echo >&2 + usage +fi +shift + +# get path to "chroot" in our current PATH +chrootPath="$(type -P chroot || :)" +if [ -z "$chrootPath" ]; then + echo >&2 "error: chroot not found. Are you root?" + echo >&2 + usage +fi + +rootfs_chroot() { + # "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately! + + # set PATH and chroot away! + PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ + "$chrootPath" "$rootfsDir" "$@" +} + +# allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ... +: ${DEBOOTSTRAP:=debootstrap} + +( + set -x + $DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@" +) + +# now for some Docker-specific tweaks + +# prevent init scripts from running during install/update +echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'" +cat > "$rootfsDir/usr/sbin/policy-rc.d" <<-'EOF' + #!/bin/sh + + # For most Docker users, "apt-get install" only happens during "docker build", + # where starting services doesn't work and often fails in humorous ways. This + # prevents those failures by stopping the services from attempting to start. + + exit 101 +EOF +chmod +x "$rootfsDir/usr/sbin/policy-rc.d" + +# prevent upstart scripts from running during install/update +( + set -x + rootfs_chroot dpkg-divert --local --rename --add /sbin/initctl + cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl" + sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl" +) + +# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB) +( set -x; rootfs_chroot apt-get clean ) + +# this file is one APT creates to make sure we don't "autoremove" our currently +# in-use kernel, which doesn't really apply to debootstraps/Docker images that +# don't even have kernels installed +rm -f "$rootfsDir/etc/apt/apt.conf.d/01autoremove-kernels" + +# Ubuntu 10.04 sucks... :) +if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then + # force dpkg not to call sync() after package extraction (speeding up installs) + echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" + cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF' + # For most Docker users, package installs happen during "docker build", which + # doesn't survive power loss and gets restarted clean afterwards anyhow, so + # this minor tweak gives us a nice speedup (much nicer on spinning disks, + # obviously). + + force-unsafe-io + EOF +fi + +if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then + # _keep_ us lean by effectively running "apt-get clean" after every install + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF + # Since for most Docker users, package installs happen in "docker build" steps, + # they essentially become individual layers due to the way Docker handles + # layering, especially using CoW filesystems. What this means for us is that + # the caches that APT keeps end up just wasting space in those layers, making + # our layers unnecessarily large (especially since we'll normally never use + # these caches again and will instead just "docker build" again and make a brand + # new image). + + # Ideally, these would just be invoking "apt-get clean", but in our testing, + # that ended up being cyclic and we got stuck on APT's lock, so we get this fun + # creation that's essentially just "apt-get clean". + DPkg::Post-Invoke { ${aptGetClean} }; + APT::Update::Post-Invoke { ${aptGetClean} }; + + Dir::Cache::pkgcache ""; + Dir::Cache::srcpkgcache ""; + + # Note that we do realize this isn't the ideal way to do this, and are always + # open to better suggestions (https://github.com/docker/docker/issues). + EOF + + # remove apt-cache translations for fast "apt-get update" + echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF' + # In Docker, we don't often need the "Translations" files, so we're just wasting + # time and space by downloading them, and this inhibits that. For users that do + # need them, it's a simple matter to delete this file and "apt-get update". :) + + Acquire::Languages "none"; + EOF + + echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' + # Since Docker users using "RUN apt-get update && apt-get install -y ..." in + # their Dockerfiles don't go delete the lists files afterwards, we want them to + # be as small as possible on-disk, so we explicitly request "gz" versions and + # tell Apt to keep them gzipped on-disk. + + # For comparison, an "apt-get update" layer without this on a pristine + # "debian:wheezy" base image was "29.88 MB", where with this it was only + # "8.273 MB". + + Acquire::GzipIndexes "true"; + Acquire::CompressionTypes::Order:: "gz"; + EOF + + # update "autoremove" configuration to be aggressive about removing suggests deps that weren't manually installed + echo >&2 "+ echo Apt::AutoRemove::SuggestsImportant 'false' > '$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests" <<-'EOF' + # Since Docker users are looking for the smallest possible final images, the + # following emerges as a very common pattern: + + # RUN apt-get update \ + # && apt-get install -y \ + # && \ + # && apt-get purge -y --auto-remove + + # By default, APT will actually _keep_ packages installed via Recommends or + # Depends if another package Suggests them, even and including if the package + # that originally caused them to be installed is removed. Setting this to + # "false" ensures that APT is appropriately aggressive about removing the + # packages it added. + + # https://aptitude.alioth.debian.org/doc/en/ch02s05s05.html#configApt-AutoRemove-SuggestsImportant + Apt::AutoRemove::SuggestsImportant "false"; + EOF +fi + +if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then + # tweak sources.list, where appropriate + lsbDist= + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then + lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then + lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then + lsbDist='Debian' + fi + # normalize to lowercase for easier matching + lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')" + case "$lsbDist" in + debian) + # updates and security! + if curl -o /dev/null -s --head --fail "http://security.debian.org/dists/$suite/updates/main/binary-$(rootfs_chroot dpkg --print-architecture)/Packages.gz"; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + ubuntu) + # add the updates and security repositories + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates /; p; + s/ $suite-updates / ${suite}-security / + " "$rootfsDir/etc/apt/sources.list" + ) + ;; + tanglu) + # add the updates repository + if [ "$suite" != 'devel' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + steamos) + # add contrib and non-free if "main" is the only component + ( + set -x + sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" + ) + ;; + esac +fi + +( + set -x + + # make sure we're fully up-to-date + rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y' + + # delete all the apt list files since they're big and get stale quickly + rm -rf "$rootfsDir/var/lib/apt/lists"/* + # this forces "apt-get update" in dependent images, which is also good + + mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing." +) diff --git a/components/engine/contrib/mkimage/mageia-urpmi b/components/engine/contrib/mkimage/mageia-urpmi new file mode 100755 index 0000000000..93fb289cac --- /dev/null +++ b/components/engine/contrib/mkimage/mageia-urpmi @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# +# Needs to be run from Mageia 4 or greater for kernel support for docker. +# +# Mageia 4 does not have docker available in official repos, so please +# install and run the docker binary manually. +# +# Tested working versions are for Mageia 2 onwards (inc. cauldron). +# +set -e + +rootfsDir="$1" +shift + +optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@") +eval set -- "$optTemp" +unset optTemp + +installversion= +mirror= +while true; do + case "$1" in + -v|--version) installversion="$2" ; shift 2 ;; + -m|--mirror) mirror="$2" ; shift 2 ;; + --) shift ; break ;; + esac +done + +if [ -z $installversion ]; then + # Attempt to match host version + if [ -r /etc/mageia-release ]; then + installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)" + else + echo "Error: no version supplied and unable to detect host mageia version" + exit 1 + fi +fi + +if [ -z $mirror ]; then + # No mirror provided, default to mirrorlist + mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list" +fi + +( + set -x + urpmi.addmedia --distrib \ + $mirror \ + --urpmi-root "$rootfsDir" + urpmi basesystem-minimal urpmi \ + --auto \ + --no-suggests \ + --urpmi-root "$rootfsDir" \ + --root "$rootfsDir" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi diff --git a/components/engine/contrib/mkimage/rinse b/components/engine/contrib/mkimage/rinse new file mode 100755 index 0000000000..75eb4f0d9d --- /dev/null +++ b/components/engine/contrib/mkimage/rinse @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# specifying --arch below is safe because "$@" can override it and the "latest" one wins :) + +( + set -x + rinse --directory "$rootfsDir" --arch amd64 "$@" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi + +# make sure we're fully up-to-date, too +( + set -x + chroot "$rootfsDir" yum update -y +) diff --git a/components/engine/contrib/mkimage/solaris b/components/engine/contrib/mkimage/solaris new file mode 100755 index 0000000000..158970e69e --- /dev/null +++ b/components/engine/contrib/mkimage/solaris @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# +# Solaris 12 base image build script. +# +set -e + +# TODO add optional package publisher origin + +rootfsDir="$1" +shift + +# base install +( + set -x + + pkg image-create --full --zone \ + --facet facet.locale.*=false \ + --facet facet.locale.POSIX=true \ + --facet facet.doc=false \ + --facet facet.doc.*=false \ + "$rootfsDir" + + pkg -R "$rootfsDir" set-property use-system-repo true + + pkg -R "$rootfsDir" set-property flush-content-cache-on-success true + + pkg -R "$rootfsDir" install core-os +) + +# Lay in stock configuration, set up milestone +# XXX This all may become optional in a base image +( + # faster to build repository database on tmpfs + REPO_DB=/system/volatile/repository.$$ + export SVCCFG_REPOSITORY=${REPO_DB} + export SVCCFG_DOOR_PATH=$rootfsDir/system/volatile/tmp_repo_door + + # Import base manifests. NOTE These are a combination of basic requirement + # and gleaned from container milestone manifest. They may change. + for m in $rootfsDir/lib/svc/manifest/system/environment.xml \ + $rootfsDir/lib/svc/manifest/system/svc/global.xml \ + $rootfsDir/lib/svc/manifest/system/svc/restarter.xml \ + $rootfsDir/lib/svc/manifest/network/dns/client.xml \ + $rootfsDir/lib/svc/manifest/system/name-service/switch.xml \ + $rootfsDir/lib/svc/manifest/system/name-service/cache.xml \ + $rootfsDir/lib/svc/manifest/milestone/container.xml ; do + svccfg import $m + done + + # Apply system layer profile, deleting unnecessary dependencies + svccfg apply $rootfsDir/etc/svc/profile/generic_container.xml + + # XXX Even if we keep a repo in the base image, this is definitely optional + svccfg apply $rootfsDir/etc/svc/profile/sysconfig/container_sc.xml + + for s in svc:/system/svc/restarter \ + svc:/system/environment \ + svc:/network/dns/client \ + svc:/system/name-service/switch \ + svc:/system/name-service/cache \ + svc:/system/svc/global \ + svc:/milestone/container ;do + svccfg -s $s refresh + done + + # now copy the built up repository into the base rootfs + mv $REPO_DB $rootfsDir/etc/svc/repository.db +) + +# pkg(1) needs the zoneproxy-client running in the container. +# use a simple wrapper to run it as needed. +# XXX maybe we go back to running this in SMF? +mv "$rootfsDir/usr/bin/pkg" "$rootfsDir/usr/bin/wrapped_pkg" +cat > "$rootfsDir/usr/bin/pkg" <<-'EOF' +#!/bin/sh +# +# THIS FILE CREATED DURING DOCKER BASE IMAGE CREATION +# +# The Solaris base image uses the sysrepo proxy mechanism. The +# IPS client pkg(1) requires the zoneproxy-client to reach the +# remote publisher origins through the host. This wrapper script +# enables and disables the proxy client as needed. This is a +# temporary solution. + +/usr/lib/zones/zoneproxy-client -s localhost:1008 +PKG_SYSREPO_URL=http://localhost:1008 /usr/bin/wrapped_pkg "$@" +pkill -9 zoneproxy-client +EOF +chmod +x "$rootfsDir/usr/bin/pkg" diff --git a/components/engine/contrib/nnp-test/Dockerfile b/components/engine/contrib/nnp-test/Dockerfile new file mode 100644 index 0000000000..026d86954f --- /dev/null +++ b/components/engine/contrib/nnp-test/Dockerfile @@ -0,0 +1,9 @@ +FROM buildpack-deps:jessie + +COPY . /usr/src/ + +WORKDIR /usr/src/ + +RUN gcc -g -Wall -static nnp-test.c -o /usr/bin/nnp-test + +RUN chmod +s /usr/bin/nnp-test diff --git a/components/engine/contrib/nnp-test/nnp-test.c b/components/engine/contrib/nnp-test/nnp-test.c new file mode 100644 index 0000000000..b767da7e1a --- /dev/null +++ b/components/engine/contrib/nnp-test/nnp-test.c @@ -0,0 +1,10 @@ +#include +#include +#include + +int main(int argc, char *argv[]) +{ + printf("EUID=%d\n", geteuid()); + return 0; +} + diff --git a/components/engine/contrib/nuke-graph-directory.sh b/components/engine/contrib/nuke-graph-directory.sh new file mode 100755 index 0000000000..3d2f49e869 --- /dev/null +++ b/components/engine/contrib/nuke-graph-directory.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +set -e + +dir="$1" + +if [ -z "$dir" ]; then + { + echo 'This script is for destroying old /var/lib/docker directories more safely than' + echo ' "rm -rf", which can cause data loss or other serious issues.' + echo + echo "usage: $0 directory" + echo " ie: $0 /var/lib/docker" + } >&2 + exit 1 +fi + +if [ "$(id -u)" != 0 ]; then + echo >&2 "error: $0 must be run as root" + exit 1 +fi + +if [ ! -d "$dir" ]; then + echo >&2 "error: $dir is not a directory" + exit 1 +fi + +dir="$(readlink -f "$dir")" + +echo +echo "Nuking $dir ..." +echo ' (if this is wrong, press Ctrl+C NOW!)' +echo + +( set -x; sleep 10 ) +echo + +dir_in_dir() { + inner="$1" + outer="$2" + [ "${inner#$outer}" != "$inner" ] +} + +# let's start by unmounting any submounts in $dir +# (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!) +for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do + mount="$(readlink -f "$mount" || true)" + if [ "$dir" != "$mount" ] && dir_in_dir "$mount" "$dir"; then + ( set -x; umount -f "$mount" ) + fi +done + +# now, let's go destroy individual btrfs subvolumes, if any exist +if command -v btrfs > /dev/null 2>&1; then + # Find btrfs subvolumes under $dir checking for inode 256 + # Source: http://stackoverflow.com/a/32865333 + for subvol in $(find "$dir" -type d -inum 256 | sort -r); do + if [ "$dir" != "$subvol" ]; then + ( set -x; btrfs subvolume delete "$subvol" ) + fi + done +fi + +# finally, DESTROY ALL THINGS +( shopt -s dotglob; set -x; rm -rf "$dir"/* ) diff --git a/components/engine/contrib/project-stats.sh b/components/engine/contrib/project-stats.sh new file mode 100755 index 0000000000..2691c72ffb --- /dev/null +++ b/components/engine/contrib/project-stats.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +## Run this script from the root of the docker repository +## to query project stats useful to the maintainers. +## You will need to install `pulls` and `issues` from +## https://github.com/crosbymichael/pulls + +set -e + +echo -n "Open pulls: " +PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 +echo $PULLS + +echo -n "Pulls alru: " +pulls alru + +echo -n "Open issues: " +ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 +echo $ISSUES + +echo -n "Issues alru: " +issues alru diff --git a/components/engine/contrib/report-issue.sh b/components/engine/contrib/report-issue.sh new file mode 100755 index 0000000000..cb54f1a5bc --- /dev/null +++ b/components/engine/contrib/report-issue.sh @@ -0,0 +1,105 @@ +#!/bin/sh + +# This is a convenience script for reporting issues that include a base +# template of information. See https://github.com/docker/docker/pull/8845 + +set -e + +DOCKER_ISSUE_URL=${DOCKER_ISSUE_URL:-"https://github.com/docker/docker/issues/new"} +DOCKER_ISSUE_NAME_PREFIX=${DOCKER_ISSUE_NAME_PREFIX:-"Report: "} +DOCKER=${DOCKER:-"docker"} +DOCKER_COMMAND="${DOCKER}" +export DOCKER_COMMAND + +# pulled from https://gist.github.com/cdown/1163649 +function urlencode() { + # urlencode + + local length="${#1}" + for (( i = 0; i < length; i++ )); do + local c="${1:i:1}" + case $c in + [a-zA-Z0-9.~_-]) printf "$c" ;; + *) printf '%%%02X' "'$c" + esac + done +} + +function template() { +# this should always match the template from CONTRIBUTING.md + cat <<- EOM + Description of problem: + + + \`docker version\`: + `${DOCKER_COMMAND} -D version` + + + \`docker info\`: + `${DOCKER_COMMAND} -D info` + + + \`uname -a\`: + `uname -a` + + + Environment details (AWS, VirtualBox, physical, etc.): + + + How reproducible: + + + Steps to Reproduce: + 1. + 2. + 3. + + + Actual Results: + + + Expected Results: + + + Additional info: + + + EOM +} + +function format_issue_url() { + if [ ${#@} -ne 2 ] ; then + return 1 + fi + local issue_name=$(urlencode "${DOCKER_ISSUE_NAME_PREFIX}${1}") + local issue_body=$(urlencode "${2}") + echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}" +} + + +echo -ne "Do you use \`sudo\` to call docker? [y|N]: " +read -r -n 1 use_sudo +echo "" + +if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then + export DOCKER_COMMAND="sudo ${DOCKER}" +fi + +echo -ne "Title of new issue?: " +read -r issue_title +echo "" + +issue_url=$(format_issue_url "${issue_title}" "$(template)") + +if which xdg-open 2>/dev/null >/dev/null ; then + echo -ne "Would like to launch this report in your browser? [Y|n]: " + read -r -n 1 launch_now + echo "" + + if [ "${launch_now}" != "n" -a "${launch_now}" != "N" ]; then + xdg-open "${issue_url}" + fi +fi + +echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}" + diff --git a/components/engine/contrib/reprepro/suites.sh b/components/engine/contrib/reprepro/suites.sh new file mode 100755 index 0000000000..badb34af9d --- /dev/null +++ b/components/engine/contrib/reprepro/suites.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$BASH_SOURCE")/../.." + +targets_from() { + git fetch -q https://github.com/docker/docker.git "$1" + git ls-tree -r --name-only "$(git rev-parse FETCH_HEAD)" contrib/builder/deb/ | grep '/Dockerfile$' | sed -r 's!^contrib/builder/deb/|^contrib/builder/deb/amd64/|-debootstrap|/Dockerfile$!!g' | grep -v / +} + +release_branch=$(git ls-remote --heads https://github.com/docker/docker.git | awk -F 'refs/heads/' '$2 ~ /^release/ { print $2 }' | sort -V | tail -1) +{ targets_from master; targets_from "$release_branch"; } | sort -u diff --git a/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE b/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE new file mode 100644 index 0000000000..d511905c16 --- /dev/null +++ b/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/Makefile b/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/Makefile new file mode 100644 index 0000000000..16df33ef32 --- /dev/null +++ b/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/Makefile @@ -0,0 +1,23 @@ +TARGETS?=docker +MODULES?=${TARGETS:=.pp.bz2} +SHAREDIR?=/usr/share + +all: ${TARGETS:=.pp.bz2} + +%.pp.bz2: %.pp + @echo Compressing $^ -\> $@ + bzip2 -9 $^ + +%.pp: %.te + make -f ${SHAREDIR}/selinux/devel/Makefile $@ + +clean: + rm -f *~ *.tc *.pp *.pp.bz2 + rm -rf tmp *.tar.gz + +man: install + sepolicy manpage --domain ${TARGETS}_t + +install: + semodule -i ${TARGETS} + diff --git a/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/README.md b/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/README.md new file mode 100644 index 0000000000..7ea3117a89 --- /dev/null +++ b/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/README.md @@ -0,0 +1 @@ +SELinux policy for docker diff --git a/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc b/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc new file mode 100644 index 0000000000..d6cb0e5792 --- /dev/null +++ b/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc @@ -0,0 +1,29 @@ +/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) + +/usr/bin/docker -- gen_context(system_u:object_r:docker_exec_t,s0) +/usr/bin/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) +/usr/lib/docker/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) + +/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) +/usr/lib/systemd/system/docker-novolume-plugin.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) + +/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) + +/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) + +/var/run/docker(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker/plugins(/.*)? gen_context(system_u:object_r:docker_plugin_var_run_t,s0) + +/var/lock/lxc(/.*)? gen_context(system_u:object_r:docker_lock_t,s0) + +/var/log/lxc(/.*)? gen_context(system_u:object_r:docker_log_t,s0) + +/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/docker.if b/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/docker.if new file mode 100644 index 0000000000..e087e8b98b --- /dev/null +++ b/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/docker.if @@ -0,0 +1,523 @@ + +## The open-source application container engine. + +######################################## +## +## Execute docker in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_domtrans',` + gen_require(` + type docker_t, docker_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_exec_t, docker_t) +') + +######################################## +## +## Execute docker in the caller domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_exec',` + gen_require(` + type docker_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_exec_t) +') + +######################################## +## +## Search docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_search_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + files_search_var_lib($1) +') + +######################################## +## +## Execute docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + can_exec($1, docker_var_lib_t) +') + +######################################## +## +## Read docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Read docker share files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_share_files',` + gen_require(` + type docker_share_t; + ') + + files_search_var_lib($1) + list_dirs_pattern($1, docker_share_t, docker_share_t) + read_files_pattern($1, docker_share_t, docker_share_t) + read_lnk_files_pattern($1, docker_share_t, docker_share_t) +') + +###################################### +## +## Allow the specified domain to execute apache +## in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`apache_exec',` + gen_require(` + type httpd_exec_t; + ') + + can_exec($1, httpd_exec_t) +') + +###################################### +## +## Allow the specified domain to execute docker shared files +## in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_share_files',` + gen_require(` + type docker_share_t; + ') + + can_exec($1, docker_share_t) +') + +######################################## +## +## Manage docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) + manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Manage docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_dirs',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Create objects in a docker var lib directory +## with an automatic type transition to +## a specified private type. +## +## +## +## Domain allowed access. +## +## +## +## +## The type of the object to create. +## +## +## +## +## The class of the object to be created. +## +## +## +## +## The name of the object being created. +## +## +# +interface(`docker_lib_filetrans',` + gen_require(` + type docker_var_lib_t; + ') + + filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) +') + +######################################## +## +## Read docker PID files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_pid_files',` + gen_require(` + type docker_var_run_t; + ') + + files_search_pids($1) + read_files_pattern($1, docker_var_run_t, docker_var_run_t) +') + +######################################## +## +## Execute docker server in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_systemctl',` + gen_require(` + type docker_t; + type docker_unit_file_t; + ') + + systemd_exec_systemctl($1) + init_reload_services($1) + systemd_read_fifo_file_passwd_run($1) + allow $1 docker_unit_file_t:file read_file_perms; + allow $1 docker_unit_file_t:service manage_service_perms; + + ps_process_pattern($1, docker_t) +') + +######################################## +## +## Read and write docker shared memory. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_rw_sem',` + gen_require(` + type docker_t; + ') + + allow $1 docker_t:sem rw_sem_perms; +') + +####################################### +## +## Read and write the docker pty type. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_use_ptys',` + gen_require(` + type docker_devpts_t; + ') + + allow $1 docker_devpts_t:chr_file rw_term_perms; +') + +####################################### +## +## Allow domain to create docker content +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_filetrans_named_content',` + + gen_require(` + type docker_var_lib_t; + type docker_share_t; + type docker_log_t; + type docker_var_run_t; + type docker_home_t; + ') + + files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") + files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") + files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") + logging_log_filetrans($1, docker_log_t, dir, "lxc") + files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") + userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") +') + +######################################## +## +## Connect to docker over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_stream_connect',` + gen_require(` + type docker_t, docker_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) +') + +######################################## +## +## Connect to SPC containers over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_stream_connect',` + gen_require(` + type spc_t, spc_var_run_t; + ') + + files_search_pids($1) + files_write_all_pid_sockets($1) + allow $1 spc_t:unix_stream_socket connectto; +') + +######################################## +## +## All of the rules required to administrate +## an docker environment +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_admin',` + gen_require(` + type docker_t; + type docker_var_lib_t, docker_var_run_t; + type docker_unit_file_t; + type docker_lock_t; + type docker_log_t; + type docker_config_t; + ') + + allow $1 docker_t:process { ptrace signal_perms }; + ps_process_pattern($1, docker_t) + + admin_pattern($1, docker_config_t) + + files_search_var_lib($1) + admin_pattern($1, docker_var_lib_t) + + files_search_pids($1) + admin_pattern($1, docker_var_run_t) + + files_search_locks($1) + admin_pattern($1, docker_lock_t) + + logging_search_logs($1) + admin_pattern($1, docker_log_t) + + docker_systemctl($1) + admin_pattern($1, docker_unit_file_t) + allow $1 docker_unit_file_t:service all_service_perms; + + optional_policy(` + systemd_passwd_agent_exec($1) + systemd_read_fifo_file_passwd_run($1) + ') +') + +######################################## +## +## Execute docker_auth_exec_t in the docker_auth domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_auth_domtrans',` + gen_require(` + type docker_auth_t, docker_auth_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_auth_exec_t, docker_auth_t) +') + +###################################### +## +## Execute docker_auth in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_exec',` + gen_require(` + type docker_auth_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_auth_exec_t) +') + +######################################## +## +## Connect to docker_auth over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_stream_connect',` + gen_require(` + type docker_auth_t, docker_plugin_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_plugin_var_run_t, docker_plugin_var_run_t, docker_auth_t) +') + +######################################## +## +## docker domain typebounds calling domain. +## +## +## +## Domain to be typebound. +## +## +# +interface(`docker_typebounds',` + gen_require(` + type docker_t; + ') + + typebounds docker_t $1; +') + +######################################## +## +## Allow any docker_exec_t to be an entrypoint of this domain +## +## +## +## Domain allowed access. +## +## +## +# +interface(`docker_entrypoint',` + gen_require(` + type docker_exec_t; + ') + allow $1 docker_exec_t:file entrypoint; +') diff --git a/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/docker.te b/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/docker.te new file mode 100644 index 0000000000..4231688382 --- /dev/null +++ b/components/engine/contrib/selinux-fedora-24/docker-engine-selinux/docker.te @@ -0,0 +1,399 @@ +policy_module(docker, 1.0.0) + +######################################## +# +# Declarations +# + +## +##

+## Determine whether docker can +## connect to all TCP ports. +##

+##
+gen_tunable(docker_connect_any, false) + +type docker_t; +type docker_exec_t; +init_daemon_domain(docker_t, docker_exec_t) +domain_subj_id_change_exemption(docker_t) +domain_role_change_exemption(docker_t) + +type spc_t; +domain_type(spc_t) +role system_r types spc_t; + +type docker_auth_t; +type docker_auth_exec_t; +init_daemon_domain(docker_auth_t, docker_auth_exec_t) + +type spc_var_run_t; +files_pid_file(spc_var_run_t) + +type docker_var_lib_t; +files_type(docker_var_lib_t) + +type docker_home_t; +userdom_user_home_content(docker_home_t) + +type docker_config_t; +files_config_file(docker_config_t) + +type docker_lock_t; +files_lock_file(docker_lock_t) + +type docker_log_t; +logging_log_file(docker_log_t) + +type docker_tmp_t; +files_tmp_file(docker_tmp_t) + +type docker_tmpfs_t; +files_tmpfs_file(docker_tmpfs_t) + +type docker_var_run_t; +files_pid_file(docker_var_run_t) + +type docker_plugin_var_run_t; +files_pid_file(docker_plugin_var_run_t) + +type docker_unit_file_t; +systemd_unit_file(docker_unit_file_t) + +type docker_devpts_t; +term_pty(docker_devpts_t) + +type docker_share_t; +files_type(docker_share_t) + +######################################## +# +# docker local policy +# +allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; +allow docker_t self:tun_socket relabelto; +allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; +allow docker_t self:fifo_file rw_fifo_file_perms; +allow docker_t self:unix_stream_socket create_stream_socket_perms; +allow docker_t self:tcp_socket create_stream_socket_perms; +allow docker_t self:udp_socket create_socket_perms; +allow docker_t self:capability2 block_suspend; + +docker_auth_stream_connect(docker_t) + +manage_files_pattern(docker_t, docker_home_t, docker_home_t) +manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) +manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) +userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") + +manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) +manage_files_pattern(docker_t, docker_config_t, docker_config_t) +files_etc_filetrans(docker_t, docker_config_t, dir, "docker") + +manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) +manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) +files_lock_filetrans(docker_t, docker_lock_t, { dir file }, "lxc") + +manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) +manage_files_pattern(docker_t, docker_log_t, docker_log_t) +manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) +logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) +allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; + +manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +allow docker_t docker_tmpfs_t:dir relabelfrom; +can_exec(docker_t, docker_tmpfs_t) +fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) +allow docker_t docker_tmpfs_t:chr_file mounton; + +manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) +manage_files_pattern(docker_t, docker_share_t, docker_share_t) +manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) +allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; + +can_exec(docker_t, docker_share_t) +#docker_filetrans_named_content(docker_t) + +manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; +files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) + +allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; +term_create_pty(docker_t, docker_devpts_t) + +kernel_read_system_state(docker_t) +kernel_read_network_state(docker_t) +kernel_read_all_sysctls(docker_t) +kernel_rw_net_sysctls(docker_t) +kernel_setsched(docker_t) +kernel_read_all_proc(docker_t) + +domain_use_interactive_fds(docker_t) +domain_dontaudit_read_all_domains_state(docker_t) + +corecmd_exec_bin(docker_t) +corecmd_exec_shell(docker_t) + +corenet_tcp_bind_generic_node(docker_t) +corenet_tcp_sendrecv_generic_if(docker_t) +corenet_tcp_sendrecv_generic_node(docker_t) +corenet_tcp_sendrecv_generic_port(docker_t) +corenet_tcp_bind_all_ports(docker_t) +corenet_tcp_connect_http_port(docker_t) +corenet_tcp_connect_commplex_main_port(docker_t) +corenet_udp_sendrecv_generic_if(docker_t) +corenet_udp_sendrecv_generic_node(docker_t) +corenet_udp_sendrecv_all_ports(docker_t) +corenet_udp_bind_generic_node(docker_t) +corenet_udp_bind_all_ports(docker_t) + +files_read_config_files(docker_t) +files_dontaudit_getattr_all_dirs(docker_t) +files_dontaudit_getattr_all_files(docker_t) + +fs_read_cgroup_files(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_search_all(docker_t) +fs_getattr_all_fs(docker_t) + +storage_raw_rw_fixed_disk(docker_t) + +auth_use_nsswitch(docker_t) +auth_dontaudit_getattr_shadow(docker_t) + +init_read_state(docker_t) +init_status(docker_t) + +logging_send_audit_msgs(docker_t) +logging_send_syslog_msg(docker_t) + +miscfiles_read_localization(docker_t) + +mount_domtrans(docker_t) + +seutil_read_default_contexts(docker_t) +seutil_read_config(docker_t) + +sysnet_dns_name_resolve(docker_t) +sysnet_exec_ifconfig(docker_t) + +optional_policy(` + rpm_exec(docker_t) + rpm_read_db(docker_t) + rpm_exec(docker_t) +') + +optional_policy(` + fstools_domtrans(docker_t) +') + +optional_policy(` + iptables_domtrans(docker_t) +') + +optional_policy(` + openvswitch_stream_connect(docker_t) +') + +# +# lxc rules +# + +allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; + +allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; + +allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; +allow docker_t self:netlink_audit_socket create_netlink_socket_perms; +allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; +allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; + +allow docker_t docker_var_lib_t:dir mounton; +allow docker_t docker_var_lib_t:chr_file mounton; +can_exec(docker_t, docker_var_lib_t) + +kernel_dontaudit_setsched(docker_t) +kernel_get_sysvipc_info(docker_t) +kernel_request_load_module(docker_t) +kernel_mounton_messages(docker_t) +kernel_mounton_all_proc(docker_t) +kernel_mounton_all_sysctls(docker_t) +kernel_unlabeled_entry_type(spc_t) +kernel_unlabeled_domtrans(docker_t, spc_t) + +dev_getattr_all(docker_t) +dev_getattr_sysfs_fs(docker_t) +dev_read_urand(docker_t) +dev_read_lvm_control(docker_t) +dev_rw_sysfs(docker_t) +dev_rw_loop_control(docker_t) +dev_rw_lvm_control(docker_t) + +files_getattr_isid_type_dirs(docker_t) +files_manage_isid_type_dirs(docker_t) +files_manage_isid_type_files(docker_t) +files_manage_isid_type_symlinks(docker_t) +files_manage_isid_type_chr_files(docker_t) +files_manage_isid_type_blk_files(docker_t) +files_exec_isid_files(docker_t) +files_mounton_isid(docker_t) +files_mounton_non_security(docker_t) +files_mounton_isid_type_chr_file(docker_t) + +fs_mount_all_fs(docker_t) +fs_unmount_all_fs(docker_t) +fs_remount_all_fs(docker_t) +files_mounton_isid(docker_t) +fs_manage_cgroup_dirs(docker_t) +fs_manage_cgroup_files(docker_t) +fs_relabelfrom_xattr_fs(docker_t) +fs_relabelfrom_tmpfs(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_list_hugetlbfs(docker_t) + +term_use_generic_ptys(docker_t) +term_use_ptmx(docker_t) +term_getattr_pty_fs(docker_t) +term_relabel_pty_fs(docker_t) +term_mounton_unallocated_ttys(docker_t) + +modutils_domtrans_insmod(docker_t) + +systemd_status_all_unit_files(docker_t) +systemd_start_systemd_services(docker_t) + +userdom_stream_connect(docker_t) +userdom_search_user_home_content(docker_t) +userdom_read_all_users_state(docker_t) +userdom_relabel_user_home_files(docker_t) +userdom_relabel_user_tmp_files(docker_t) +userdom_relabel_user_tmp_dirs(docker_t) + +optional_policy(` + gpm_getattr_gpmctl(docker_t) +') + +optional_policy(` + dbus_system_bus_client(docker_t) + init_dbus_chat(docker_t) + init_start_transient_unit(docker_t) + + optional_policy(` + systemd_dbus_chat_logind(docker_t) + systemd_dbus_chat_machined(docker_t) + ') + + optional_policy(` + firewalld_dbus_chat(docker_t) + ') +') + +optional_policy(` + udev_read_db(docker_t) +') + +optional_policy(` + unconfined_domain(docker_t) + unconfined_typebounds(docker_t) +') + +optional_policy(` + virt_read_config(docker_t) + virt_exec(docker_t) + virt_stream_connect(docker_t) + virt_stream_connect_sandbox(docker_t) + virt_exec_sandbox_files(docker_t) + virt_manage_sandbox_files(docker_t) + virt_relabel_sandbox_filesystem(docker_t) + # for lxc + virt_transition_svirt_sandbox(docker_t, system_r) + virt_mounton_sandbox_file(docker_t) +# virt_attach_sandbox_tun_iface(docker_t) + allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; + virt_sandbox_entrypoint(docker_t) +') + +tunable_policy(`docker_connect_any',` + corenet_tcp_connect_all_ports(docker_t) + corenet_sendrecv_all_packets(docker_t) + corenet_tcp_sendrecv_all_ports(docker_t) +') + +######################################## +# +# spc local policy +# +allow spc_t { docker_var_lib_t docker_share_t }:file entrypoint; +role system_r types spc_t; + +domtrans_pattern(docker_t, docker_share_t, spc_t) +domtrans_pattern(docker_t, docker_var_lib_t, spc_t) +allow docker_t spc_t:process { setsched signal_perms }; +ps_process_pattern(docker_t, spc_t) +allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; +filetrans_pattern(docker_t, docker_var_lib_t, docker_share_t, dir, "overlay") + +optional_policy(` + systemd_dbus_chat_machined(spc_t) +') + +optional_policy(` + dbus_chat_system_bus(spc_t) +') + +optional_policy(` + unconfined_domain_noaudit(spc_t) +') + +optional_policy(` + virt_transition_svirt_sandbox(spc_t, system_r) + virt_sandbox_entrypoint(spc_t) +') + +######################################## +# +# docker_auth local policy +# +allow docker_auth_t self:fifo_file rw_fifo_file_perms; +allow docker_auth_t self:unix_stream_socket create_stream_socket_perms; +dontaudit docker_auth_t self:capability net_admin; + +docker_stream_connect(docker_auth_t) + +manage_dirs_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_sock_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_lnk_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +files_pid_filetrans(docker_auth_t, docker_plugin_var_run_t, { dir file lnk_file sock_file }) + +domain_use_interactive_fds(docker_auth_t) + +kernel_read_net_sysctls(docker_auth_t) + +auth_use_nsswitch(docker_auth_t) + +files_read_etc_files(docker_auth_t) + +miscfiles_read_localization(docker_auth_t) + +sysnet_dns_name_resolve(docker_auth_t) diff --git a/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE new file mode 100644 index 0000000000..5b6e7c66c2 --- /dev/null +++ b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile new file mode 100644 index 0000000000..1bdc695afe --- /dev/null +++ b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile @@ -0,0 +1,16 @@ +TARGETS?=docker +MODULES?=${TARGETS:=.pp.bz2} +SHAREDIR?=/usr/share + +all: ${TARGETS:=.pp.bz2} + +%.pp.bz2: %.pp + @echo Compressing $^ -\> $@ + bzip2 -9 $^ + +%.pp: %.te + make -f ${SHAREDIR}/selinux/devel/Makefile $@ + +clean: + rm -f *~ *.tc *.pp *.pp.bz2 + rm -rf tmp *.tar.gz diff --git a/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md new file mode 100644 index 0000000000..7ea3117a89 --- /dev/null +++ b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md @@ -0,0 +1 @@ +SELinux policy for docker diff --git a/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc new file mode 100644 index 0000000000..467d659604 --- /dev/null +++ b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc @@ -0,0 +1,18 @@ +/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) + +/usr/bin/dockerd -- gen_context(system_u:object_r:docker_exec_t,s0) + +/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) + +/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) + +/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) + +/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) + +/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if new file mode 100644 index 0000000000..ca075c05c5 --- /dev/null +++ b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if @@ -0,0 +1,461 @@ + +## The open-source application container engine. + +######################################## +## +## Execute docker in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_domtrans',` + gen_require(` + type docker_t, docker_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_exec_t, docker_t) +') + +######################################## +## +## Execute docker in the caller domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_exec',` + gen_require(` + type docker_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_exec_t) +') + +######################################## +## +## Search docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_search_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + files_search_var_lib($1) +') + +######################################## +## +## Execute docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + can_exec($1, docker_var_lib_t) +') + +######################################## +## +## Read docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Read docker share files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_share_files',` + gen_require(` + type docker_share_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_share_t, docker_share_t) +') + +######################################## +## +## Manage docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) + manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Manage docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_dirs',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Create objects in a docker var lib directory +## with an automatic type transition to +## a specified private type. +## +## +## +## Domain allowed access. +## +## +## +## +## The type of the object to create. +## +## +## +## +## The class of the object to be created. +## +## +## +## +## The name of the object being created. +## +## +# +interface(`docker_lib_filetrans',` + gen_require(` + type docker_var_lib_t; + ') + + filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) +') + +######################################## +## +## Read docker PID files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_pid_files',` + gen_require(` + type docker_var_run_t; + ') + + files_search_pids($1) + read_files_pattern($1, docker_var_run_t, docker_var_run_t) +') + +######################################## +## +## Execute docker server in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_systemctl',` + gen_require(` + type docker_t; + type docker_unit_file_t; + ') + + systemd_exec_systemctl($1) + init_reload_services($1) + systemd_read_fifo_file_passwd_run($1) + allow $1 docker_unit_file_t:file read_file_perms; + allow $1 docker_unit_file_t:service manage_service_perms; + + ps_process_pattern($1, docker_t) +') + +######################################## +## +## Read and write docker shared memory. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_rw_sem',` + gen_require(` + type docker_t; + ') + + allow $1 docker_t:sem rw_sem_perms; +') + +####################################### +## +## Read and write the docker pty type. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_use_ptys',` + gen_require(` + type docker_devpts_t; + ') + + allow $1 docker_devpts_t:chr_file rw_term_perms; +') + +####################################### +## +## Allow domain to create docker content +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_filetrans_named_content',` + + gen_require(` + type docker_var_lib_t; + type docker_share_t; + type docker_log_t; + type docker_var_run_t; + type docker_home_t; + ') + + files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") + files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") + files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") + files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") + userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") +') + +######################################## +## +## Connect to docker over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_stream_connect',` + gen_require(` + type docker_t, docker_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) +') + +######################################## +## +## Connect to SPC containers over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_stream_connect',` + gen_require(` + type spc_t, spc_var_run_t; + ') + + files_search_pids($1) + files_write_all_pid_sockets($1) + allow $1 spc_t:unix_stream_socket connectto; +') + + +######################################## +## +## All of the rules required to administrate +## an docker environment +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_admin',` + gen_require(` + type docker_t; + type docker_var_lib_t, docker_var_run_t; + type docker_unit_file_t; + type docker_lock_t; + type docker_log_t; + type docker_config_t; + ') + + allow $1 docker_t:process { ptrace signal_perms }; + ps_process_pattern($1, docker_t) + + admin_pattern($1, docker_config_t) + + files_search_var_lib($1) + admin_pattern($1, docker_var_lib_t) + + files_search_pids($1) + admin_pattern($1, docker_var_run_t) + + files_search_locks($1) + admin_pattern($1, docker_lock_t) + + logging_search_logs($1) + admin_pattern($1, docker_log_t) + + docker_systemctl($1) + admin_pattern($1, docker_unit_file_t) + allow $1 docker_unit_file_t:service all_service_perms; + + optional_policy(` + systemd_passwd_agent_exec($1) + systemd_read_fifo_file_passwd_run($1) + ') +') + +interface(`domain_stub_named_filetrans_domain',` + gen_require(` + attribute named_filetrans_domain; + ') +') + +interface(`lvm_stub',` + gen_require(` + type lvm_t; + ') +') +interface(`staff_stub',` + gen_require(` + type staff_t; + ') +') +interface(`virt_stub_svirt_sandbox_domain',` + gen_require(` + attribute svirt_sandbox_domain; + ') +') +interface(`virt_stub_svirt_sandbox_file',` + gen_require(` + type svirt_sandbox_file_t; + ') +') +interface(`fs_dontaudit_remount_tmpfs',` + gen_require(` + type tmpfs_t; + ') + + dontaudit $1 tmpfs_t:filesystem remount; +') +interface(`dev_dontaudit_list_all_dev_nodes',` + gen_require(` + type device_t; + ') + + dontaudit $1 device_t:dir list_dir_perms; +') +interface(`kernel_unlabeled_entry_type',` + gen_require(` + type unlabeled_t; + ') + + domain_entry_file($1, unlabeled_t) +') +interface(`kernel_unlabeled_domtrans',` + gen_require(` + type unlabeled_t; + ') + + read_lnk_files_pattern($1, unlabeled_t, unlabeled_t) + domain_transition_pattern($1, unlabeled_t, $2) + type_transition $1 unlabeled_t:process $2; +') +interface(`files_write_all_pid_sockets',` + gen_require(` + attribute pidfile; + ') + + allow $1 pidfile:sock_file write_sock_file_perms; +') +interface(`dev_dontaudit_mounton_sysfs',` + gen_require(` + type sysfs_t; + ') + + dontaudit $1 sysfs_t:dir mounton; +') diff --git a/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te new file mode 100644 index 0000000000..bad0bb6e4c --- /dev/null +++ b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te @@ -0,0 +1,407 @@ +policy_module(docker, 1.0.0) + +######################################## +# +# Declarations +# + +## +##

+## Determine whether docker can +## connect to all TCP ports. +##

+##
+gen_tunable(docker_connect_any, false) + +type docker_t; +type docker_exec_t; +init_daemon_domain(docker_t, docker_exec_t) +domain_subj_id_change_exemption(docker_t) +domain_role_change_exemption(docker_t) + +type spc_t; +domain_type(spc_t) +role system_r types spc_t; + +type spc_var_run_t; +files_pid_file(spc_var_run_t) + +type docker_var_lib_t; +files_type(docker_var_lib_t) + +type docker_home_t; +userdom_user_home_content(docker_home_t) + +type docker_config_t; +files_config_file(docker_config_t) + +type docker_lock_t; +files_lock_file(docker_lock_t) + +type docker_log_t; +logging_log_file(docker_log_t) + +type docker_tmp_t; +files_tmp_file(docker_tmp_t) + +type docker_tmpfs_t; +files_tmpfs_file(docker_tmpfs_t) + +type docker_var_run_t; +files_pid_file(docker_var_run_t) + +type docker_unit_file_t; +systemd_unit_file(docker_unit_file_t) + +type docker_devpts_t; +term_pty(docker_devpts_t) + +type docker_share_t; +files_type(docker_share_t) + +######################################## +# +# docker local policy +# +allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; +allow docker_t self:tun_socket relabelto; +allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; +allow docker_t self:fifo_file rw_fifo_file_perms; +allow docker_t self:unix_stream_socket create_stream_socket_perms; +allow docker_t self:tcp_socket create_stream_socket_perms; +allow docker_t self:udp_socket create_socket_perms; +allow docker_t self:capability2 block_suspend; + +manage_files_pattern(docker_t, docker_home_t, docker_home_t) +manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) +manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) +userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") + +manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) +manage_files_pattern(docker_t, docker_config_t, docker_config_t) +files_etc_filetrans(docker_t, docker_config_t, dir, "docker") + +manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) +manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) + +manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) +manage_files_pattern(docker_t, docker_log_t, docker_log_t) +manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) +logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) +allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; + +manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +allow docker_t docker_tmpfs_t:dir relabelfrom; +can_exec(docker_t, docker_tmpfs_t) +fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) +allow docker_t docker_tmpfs_t:chr_file mounton; + +manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) +manage_files_pattern(docker_t, docker_share_t, docker_share_t) +manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) +allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; + +can_exec(docker_t, docker_share_t) +#docker_filetrans_named_content(docker_t) + +manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; +files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) + +allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; +term_create_pty(docker_t, docker_devpts_t) + +kernel_read_system_state(docker_t) +kernel_read_network_state(docker_t) +kernel_read_all_sysctls(docker_t) +kernel_rw_net_sysctls(docker_t) +kernel_setsched(docker_t) +kernel_read_all_proc(docker_t) + +domain_use_interactive_fds(docker_t) +domain_dontaudit_read_all_domains_state(docker_t) + +corecmd_exec_bin(docker_t) +corecmd_exec_shell(docker_t) + +corenet_tcp_bind_generic_node(docker_t) +corenet_tcp_sendrecv_generic_if(docker_t) +corenet_tcp_sendrecv_generic_node(docker_t) +corenet_tcp_sendrecv_generic_port(docker_t) +corenet_tcp_bind_all_ports(docker_t) +corenet_tcp_connect_http_port(docker_t) +corenet_tcp_connect_commplex_main_port(docker_t) +corenet_udp_sendrecv_generic_if(docker_t) +corenet_udp_sendrecv_generic_node(docker_t) +corenet_udp_sendrecv_all_ports(docker_t) +corenet_udp_bind_generic_node(docker_t) +corenet_udp_bind_all_ports(docker_t) + +files_read_config_files(docker_t) +files_dontaudit_getattr_all_dirs(docker_t) +files_dontaudit_getattr_all_files(docker_t) + +fs_read_cgroup_files(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_search_all(docker_t) +fs_getattr_all_fs(docker_t) + +storage_raw_rw_fixed_disk(docker_t) + +auth_use_nsswitch(docker_t) +auth_dontaudit_getattr_shadow(docker_t) + +init_read_state(docker_t) +init_status(docker_t) + +logging_send_audit_msgs(docker_t) +logging_send_syslog_msg(docker_t) + +miscfiles_read_localization(docker_t) + +mount_domtrans(docker_t) + +seutil_read_default_contexts(docker_t) +seutil_read_config(docker_t) + +sysnet_dns_name_resolve(docker_t) +sysnet_exec_ifconfig(docker_t) + +optional_policy(` + rpm_exec(docker_t) + rpm_read_db(docker_t) + rpm_exec(docker_t) +') + +optional_policy(` + fstools_domtrans(docker_t) +') + +optional_policy(` + iptables_domtrans(docker_t) +') + +optional_policy(` + openvswitch_stream_connect(docker_t) +') + +allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; + +allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; + +allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; +allow docker_t self:netlink_audit_socket create_netlink_socket_perms; +allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; +allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; + +allow docker_t docker_var_lib_t:dir mounton; +allow docker_t docker_var_lib_t:chr_file mounton; +can_exec(docker_t, docker_var_lib_t) + +kernel_dontaudit_setsched(docker_t) +kernel_get_sysvipc_info(docker_t) +kernel_request_load_module(docker_t) +kernel_mounton_messages(docker_t) +kernel_mounton_all_proc(docker_t) +kernel_mounton_all_sysctls(docker_t) +kernel_unlabeled_entry_type(spc_t) +kernel_unlabeled_domtrans(docker_t, spc_t) + +dev_getattr_all(docker_t) +dev_getattr_sysfs_fs(docker_t) +dev_read_urand(docker_t) +dev_read_lvm_control(docker_t) +dev_rw_sysfs(docker_t) +dev_rw_loop_control(docker_t) +dev_rw_lvm_control(docker_t) + +files_getattr_isid_type_dirs(docker_t) +files_manage_isid_type_dirs(docker_t) +files_manage_isid_type_files(docker_t) +files_manage_isid_type_symlinks(docker_t) +files_manage_isid_type_chr_files(docker_t) +files_manage_isid_type_blk_files(docker_t) +files_exec_isid_files(docker_t) +files_mounton_isid(docker_t) +files_mounton_non_security(docker_t) +files_mounton_isid_type_chr_file(docker_t) + +fs_mount_all_fs(docker_t) +fs_unmount_all_fs(docker_t) +fs_remount_all_fs(docker_t) +files_mounton_isid(docker_t) +fs_manage_cgroup_dirs(docker_t) +fs_manage_cgroup_files(docker_t) +fs_relabelfrom_xattr_fs(docker_t) +fs_relabelfrom_tmpfs(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_list_hugetlbfs(docker_t) + +term_use_generic_ptys(docker_t) +term_use_ptmx(docker_t) +term_getattr_pty_fs(docker_t) +term_relabel_pty_fs(docker_t) +term_mounton_unallocated_ttys(docker_t) + +modutils_domtrans_insmod(docker_t) + +systemd_status_all_unit_files(docker_t) +systemd_start_systemd_services(docker_t) + +userdom_stream_connect(docker_t) +userdom_search_user_home_content(docker_t) +userdom_read_all_users_state(docker_t) +userdom_relabel_user_home_files(docker_t) +userdom_relabel_user_tmp_files(docker_t) +userdom_relabel_user_tmp_dirs(docker_t) + +optional_policy(` + gpm_getattr_gpmctl(docker_t) +') + +optional_policy(` + dbus_system_bus_client(docker_t) + init_dbus_chat(docker_t) + init_start_transient_unit(docker_t) + + optional_policy(` + systemd_dbus_chat_logind(docker_t) + ') + + optional_policy(` + firewalld_dbus_chat(docker_t) + ') +') + +optional_policy(` + udev_read_db(docker_t) +') + +optional_policy(` + virt_read_config(docker_t) + virt_exec(docker_t) + virt_stream_connect(docker_t) + virt_stream_connect_sandbox(docker_t) + virt_exec_sandbox_files(docker_t) + virt_manage_sandbox_files(docker_t) + virt_relabel_sandbox_filesystem(docker_t) + virt_transition_svirt_sandbox(docker_t, system_r) + virt_mounton_sandbox_file(docker_t) +# virt_attach_sandbox_tun_iface(docker_t) + allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; +') + +tunable_policy(`docker_connect_any',` + corenet_tcp_connect_all_ports(docker_t) + corenet_sendrecv_all_packets(docker_t) + corenet_tcp_sendrecv_all_ports(docker_t) +') + +######################################## +# +# spc local policy +# +domain_entry_file(spc_t, docker_share_t) +domain_entry_file(spc_t, docker_var_lib_t) +role system_r types spc_t; + +domain_entry_file(spc_t, docker_share_t) +domain_entry_file(spc_t, docker_var_lib_t) +domtrans_pattern(docker_t, docker_share_t, spc_t) +domtrans_pattern(docker_t, docker_var_lib_t, spc_t) +allow docker_t spc_t:process { setsched signal_perms }; +ps_process_pattern(docker_t, spc_t) +allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; + +optional_policy(` + dbus_chat_system_bus(spc_t) +') + +optional_policy(` + unconfined_domain_noaudit(spc_t) +') + +optional_policy(` + unconfined_domain(docker_t) +') + +optional_policy(` + virt_transition_svirt_sandbox(spc_t, system_r) +') + +######################################## +# +# docker upstream policy +# + +optional_policy(` +# domain_stub_named_filetrans_domain() + gen_require(` + attribute named_filetrans_domain; + ') + + docker_filetrans_named_content(named_filetrans_domain) +') + +optional_policy(` + lvm_stub() + docker_rw_sem(lvm_t) +') + +optional_policy(` + staff_stub() + docker_stream_connect(staff_t) + docker_exec(staff_t) +') + +optional_policy(` + virt_stub_svirt_sandbox_domain() + virt_stub_svirt_sandbox_file() + allow svirt_sandbox_domain self:netlink_kobject_uevent_socket create_socket_perms; + docker_read_share_files(svirt_sandbox_domain) + docker_lib_filetrans(svirt_sandbox_domain,svirt_sandbox_file_t, sock_file) + docker_use_ptys(svirt_sandbox_domain) + docker_spc_stream_connect(svirt_sandbox_domain) + fs_list_tmpfs(svirt_sandbox_domain) + fs_rw_hugetlbfs_files(svirt_sandbox_domain) + fs_dontaudit_remount_tmpfs(svirt_sandbox_domain) + dev_dontaudit_mounton_sysfs(svirt_sandbox_domain) + + tunable_policy(`virt_sandbox_use_fusefs',` + fs_manage_fusefs_dirs(svirt_sandbox_domain) + fs_manage_fusefs_files(svirt_sandbox_domain) + fs_manage_fusefs_symlinks(svirt_sandbox_domain) + ') + gen_require(` + attribute domain; + ') + + dontaudit svirt_sandbox_domain domain:key {search link}; +') + +optional_policy(` + gen_require(` + type pcp_pmcd_t; + ') + docker_manage_lib_files(pcp_pmcd_t) +') diff --git a/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker_selinux.8.gz b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker_selinux.8.gz new file mode 100644 index 0000000000..ab5d59445a Binary files /dev/null and b/components/engine/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker_selinux.8.gz differ diff --git a/components/engine/contrib/syntax/nano/Dockerfile.nanorc b/components/engine/contrib/syntax/nano/Dockerfile.nanorc new file mode 100644 index 0000000000..8b63dae945 --- /dev/null +++ b/components/engine/contrib/syntax/nano/Dockerfile.nanorc @@ -0,0 +1,26 @@ +## Syntax highlighting for Dockerfiles +syntax "Dockerfile" "Dockerfile[^/]*$" + +## Keywords +icolor red "^(ONBUILD\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)[[:space:]]" + +## Brackets & parenthesis +color brightgreen "(\(|\)|\[|\])" + +## Double ampersand +color brightmagenta "&&" + +## Comments +icolor cyan "^[[:space:]]*#.*$" + +## Blank space at EOL +color ,green "[[:space:]]+$" + +## Strings, single-quoted +color brightwhite "'([^']|(\\'))*'" "%[qw]\{[^}]*\}" "%[qw]\([^)]*\)" "%[qw]<[^>]*>" "%[qw]\[[^]]*\]" "%[qw]\$[^$]*\$" "%[qw]\^[^^]*\^" "%[qw]![^!]*!" + +## Strings, double-quoted +color brightwhite ""([^"]|(\\"))*"" "%[QW]?\{[^}]*\}" "%[QW]?\([^)]*\)" "%[QW]?<[^>]*>" "%[QW]?\[[^]]*\]" "%[QW]?\$[^$]*\$" "%[QW]?\^[^^]*\^" "%[QW]?![^!]*!" + +## Single and double quotes +color brightyellow "('|\")" diff --git a/components/engine/contrib/syntax/nano/README.md b/components/engine/contrib/syntax/nano/README.md new file mode 100644 index 0000000000..5985208b09 --- /dev/null +++ b/components/engine/contrib/syntax/nano/README.md @@ -0,0 +1,32 @@ +Dockerfile.nanorc +================= + +Dockerfile syntax highlighting for nano + +Single User Installation +------------------------ +1. Create a nano syntax directory in your home directory: + * `mkdir -p ~/.nano/syntax` + +2. Copy `Dockerfile.nanorc` to` ~/.nano/syntax/` + * `cp Dockerfile.nanorc ~/.nano/syntax/` + +3. Add the following to your `~/.nanorc` to tell nano where to find the `Dockerfile.nanorc` file + ``` +## Dockerfile files +include "~/.nano/syntax/Dockerfile.nanorc" + ``` + +System Wide Installation +------------------------ +1. Create a nano syntax directory: + * `mkdir /usr/local/share/nano` + +2. Copy `Dockerfile.nanorc` to `/usr/local/share/nano` + * `cp Dockerfile.nanorc /usr/local/share/nano/` + +3. Add the following to your `/etc/nanorc`: + ``` +## Dockerfile files +include "/usr/local/share/nano/Dockerfile.nanorc" + ``` diff --git a/components/engine/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences b/components/engine/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences new file mode 100644 index 0000000000..20f0d04ca8 --- /dev/null +++ b/components/engine/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences @@ -0,0 +1,24 @@ + + + + + name + Comments + scope + source.dockerfile + settings + + shellVariables + + + name + TM_COMMENT_START + value + # + + + + uuid + 2B215AC0-A7F3-4090-9FF6-F4842BD56CA7 + + diff --git a/components/engine/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage b/components/engine/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage new file mode 100644 index 0000000000..948a9bfc20 --- /dev/null +++ b/components/engine/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage @@ -0,0 +1,143 @@ + + + + + fileTypes + + Dockerfile + + name + Dockerfile + patterns + + + captures + + 1 + + name + keyword.control.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s + + + captures + + 1 + + name + keyword.operator.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(CMD|ENTRYPOINT)\s + + + begin + " + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + " + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.double.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + begin + ' + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + ' + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.single.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + captures + + 1 + + name + punctuation.whitespace.comment.leading.dockerfile + + 2 + + name + comment.line.number-sign.dockerfile + + 3 + + name + punctuation.definition.comment.dockerfile + + + comment + comment.line + match + ^(\s*)((#).*$\n?) + + + scopeName + source.dockerfile + uuid + a39d8795-59d2-49af-aa00-fe74ee29576e + + diff --git a/components/engine/contrib/syntax/textmate/Docker.tmbundle/info.plist b/components/engine/contrib/syntax/textmate/Docker.tmbundle/info.plist new file mode 100644 index 0000000000..239f4b0a9b --- /dev/null +++ b/components/engine/contrib/syntax/textmate/Docker.tmbundle/info.plist @@ -0,0 +1,16 @@ + + + + + contactEmailRot13 + germ@andz.com.ar + contactName + GermanDZ + description + Helpers for Docker. + name + Docker + uuid + 8B9DDBAF-E65C-4E12-FFA7-467D4AA535B1 + + diff --git a/components/engine/contrib/syntax/textmate/README.md b/components/engine/contrib/syntax/textmate/README.md new file mode 100644 index 0000000000..ce611018e5 --- /dev/null +++ b/components/engine/contrib/syntax/textmate/README.md @@ -0,0 +1,17 @@ +# Docker.tmbundle + +Dockerfile syntax highlighting for TextMate and Sublime Text. + +## Install + +### Sublime Text + +Available for Sublime Text under [package control](https://sublime.wbond.net/packages/Dockerfile%20Syntax%20Highlighting). +Search for *Dockerfile Syntax Highlighting* + +### TextMate 2 + +You can install this bundle in TextMate by opening the preferences and going to the bundles tab. After installation it will be automatically updated for you. + +enjoy. + diff --git a/components/engine/contrib/syntax/textmate/REVIEWERS b/components/engine/contrib/syntax/textmate/REVIEWERS new file mode 100644 index 0000000000..965743df64 --- /dev/null +++ b/components/engine/contrib/syntax/textmate/REVIEWERS @@ -0,0 +1 @@ +Asbjorn Enge (@asbjornenge) diff --git a/components/engine/contrib/syntax/vim/LICENSE b/components/engine/contrib/syntax/vim/LICENSE new file mode 100644 index 0000000000..e67cdabd22 --- /dev/null +++ b/components/engine/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/components/engine/contrib/syntax/vim/README.md b/components/engine/contrib/syntax/vim/README.md new file mode 100644 index 0000000000..5aa9bd825d --- /dev/null +++ b/components/engine/contrib/syntax/vim/README.md @@ -0,0 +1,26 @@ +dockerfile.vim +============== + +Syntax highlighting for Dockerfiles + +Installation +------------ +With [pathogen](https://github.com/tpope/vim-pathogen), the usual way... + +With [Vundle](https://github.com/gmarik/Vundle.vim) + + Plugin 'docker/docker' , {'rtp': '/contrib/syntax/vim/'} + +Features +-------- + +The syntax highlighting includes: + +* The directives (e.g. `FROM`) +* Strings +* Comments + +License +------- + +BSD, short and sweet diff --git a/components/engine/contrib/syntax/vim/doc/dockerfile.txt b/components/engine/contrib/syntax/vim/doc/dockerfile.txt new file mode 100644 index 0000000000..e69e2b7b30 --- /dev/null +++ b/components/engine/contrib/syntax/vim/doc/dockerfile.txt @@ -0,0 +1,18 @@ +*dockerfile.txt* Syntax highlighting for Dockerfiles + +Author: Honza Pokorny +License: BSD + +INSTALLATION *installation* + +Drop it on your Pathogen path and you're all set. + +FEATURES *features* + +The syntax highlighting includes: + +* The directives (e.g. FROM) +* Strings +* Comments + + vim:tw=78:et:ft=help:norl: diff --git a/components/engine/contrib/syntax/vim/ftdetect/dockerfile.vim b/components/engine/contrib/syntax/vim/ftdetect/dockerfile.vim new file mode 100644 index 0000000000..ee10e5d6a0 --- /dev/null +++ b/components/engine/contrib/syntax/vim/ftdetect/dockerfile.vim @@ -0,0 +1 @@ +au BufNewFile,BufRead [Dd]ockerfile,Dockerfile.* set filetype=dockerfile diff --git a/components/engine/contrib/syntax/vim/syntax/dockerfile.vim b/components/engine/contrib/syntax/vim/syntax/dockerfile.vim new file mode 100644 index 0000000000..a067e6ad4c --- /dev/null +++ b/components/engine/contrib/syntax/vim/syntax/dockerfile.vim @@ -0,0 +1,31 @@ +" dockerfile.vim - Syntax highlighting for Dockerfiles +" Maintainer: Honza Pokorny +" Version: 0.5 + + +if exists("b:current_syntax") + finish +endif + +let b:current_syntax = "dockerfile" + +syntax case ignore + +syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s/ +highlight link dockerfileKeyword Keyword + +syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ +highlight link dockerfileString String + +syntax match dockerfileComment "\v^\s*#.*$" +highlight link dockerfileComment Comment + +set commentstring=#\ %s + +" match "RUN", "CMD", and "ENTRYPOINT" lines, and parse them as shell +let s:current_syntax = b:current_syntax +unlet b:current_syntax +syntax include @SH syntax/sh.vim +let b:current_syntax = s:current_syntax +syntax region shLine matchgroup=dockerfileKeyword start=/\v^\s*(RUN|CMD|ENTRYPOINT)\s/ end=/\v$/ contains=@SH +" since @SH will handle "\" as part of the same line automatically, this "just works" for line continuation too, but with the caveat that it will highlight "RUN echo '" followed by a newline as if it were a block because the "'" is shell line continuation... not sure how to fix that just yet (TODO) diff --git a/components/engine/contrib/syscall-test/Dockerfile b/components/engine/contrib/syscall-test/Dockerfile new file mode 100644 index 0000000000..f95f1758c0 --- /dev/null +++ b/components/engine/contrib/syscall-test/Dockerfile @@ -0,0 +1,15 @@ +FROM buildpack-deps:jessie + +COPY . /usr/src/ + +WORKDIR /usr/src/ + +RUN gcc -g -Wall -static userns.c -o /usr/bin/userns-test \ + && gcc -g -Wall -static ns.c -o /usr/bin/ns-test \ + && gcc -g -Wall -static acct.c -o /usr/bin/acct-test \ + && gcc -g -Wall -static setuid.c -o /usr/bin/setuid-test \ + && gcc -g -Wall -static setgid.c -o /usr/bin/setgid-test \ + && gcc -g -Wall -static socket.c -o /usr/bin/socket-test \ + && gcc -g -Wall -static raw.c -o /usr/bin/raw-test + +RUN [ "$(uname -m)" = "x86_64" ] && gcc -s -m32 -nostdlib exit32.s -o /usr/bin/exit32-test || true diff --git a/components/engine/contrib/syscall-test/acct.c b/components/engine/contrib/syscall-test/acct.c new file mode 100644 index 0000000000..88ac287966 --- /dev/null +++ b/components/engine/contrib/syscall-test/acct.c @@ -0,0 +1,16 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + int err = acct("/tmp/t"); + if (err == -1) { + fprintf(stderr, "acct failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/components/engine/contrib/syscall-test/exit32.s b/components/engine/contrib/syscall-test/exit32.s new file mode 100644 index 0000000000..8bbb5c58b3 --- /dev/null +++ b/components/engine/contrib/syscall-test/exit32.s @@ -0,0 +1,7 @@ +.globl _start +.text +_start: + xorl %eax, %eax + incl %eax + movb $0, %bl + int $0x80 diff --git a/components/engine/contrib/syscall-test/ns.c b/components/engine/contrib/syscall-test/ns.c new file mode 100644 index 0000000000..624388630a --- /dev/null +++ b/components/engine/contrib/syscall-test/ns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp arguments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/components/engine/contrib/syscall-test/raw.c b/components/engine/contrib/syscall-test/raw.c new file mode 100644 index 0000000000..7995a0d3a5 --- /dev/null +++ b/components/engine/contrib/syscall-test/raw.c @@ -0,0 +1,14 @@ +#include +#include +#include +#include +#include + +int main() { + if (socket(PF_INET, SOCK_RAW, IPPROTO_UDP) == -1) { + perror("socket"); + return 1; + } + + return 0; +} diff --git a/components/engine/contrib/syscall-test/setgid.c b/components/engine/contrib/syscall-test/setgid.c new file mode 100644 index 0000000000..df9680c869 --- /dev/null +++ b/components/engine/contrib/syscall-test/setgid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setgid(1) == -1) { + perror("setgid"); + return 1; + } + return 0; +} diff --git a/components/engine/contrib/syscall-test/setuid.c b/components/engine/contrib/syscall-test/setuid.c new file mode 100644 index 0000000000..5b939677e9 --- /dev/null +++ b/components/engine/contrib/syscall-test/setuid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setuid(1) == -1) { + perror("setuid"); + return 1; + } + return 0; +} diff --git a/components/engine/contrib/syscall-test/socket.c b/components/engine/contrib/syscall-test/socket.c new file mode 100644 index 0000000000..d26c82f00f --- /dev/null +++ b/components/engine/contrib/syscall-test/socket.c @@ -0,0 +1,30 @@ +#include +#include +#include +#include +#include +#include + +int main() { + int s; + struct sockaddr_in sin; + + s = socket(AF_INET, SOCK_STREAM, 0); + if (s == -1) { + perror("socket"); + return 1; + } + + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = INADDR_ANY; + sin.sin_port = htons(80); + + if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) == -1) { + perror("bind"); + return 1; + } + + close(s); + + return 0; +} diff --git a/components/engine/contrib/syscall-test/userns.c b/components/engine/contrib/syscall-test/userns.c new file mode 100644 index 0000000000..4c5c8d304e --- /dev/null +++ b/components/engine/contrib/syscall-test/userns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp arguments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWUSER | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/components/engine/contrib/udev/80-docker.rules b/components/engine/contrib/udev/80-docker.rules new file mode 100644 index 0000000000..f934c01757 --- /dev/null +++ b/components/engine/contrib/udev/80-docker.rules @@ -0,0 +1,3 @@ +# hide docker's loopback devices from udisks, and thus from user desktops +SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" +SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" diff --git a/components/engine/contrib/vagrant-docker/README.md b/components/engine/contrib/vagrant-docker/README.md new file mode 100644 index 0000000000..736c789998 --- /dev/null +++ b/components/engine/contrib/vagrant-docker/README.md @@ -0,0 +1,50 @@ +# Vagrant integration + +Currently there are at least 4 different projects that we are aware of that deals +with integration with [Vagrant](http://vagrantup.com/) at different levels. One +approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html) +which means you can create containers and pull base images on VMs using Docker's +CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html), +meaning you can use Vagrant to control Docker containers. + + +### Provisioners + +* [Vocker](https://github.com/fgrehm/vocker) +* [Ventriloquist](https://github.com/fgrehm/ventriloquist) + +### Providers + +* [docker-provider](https://github.com/fgrehm/docker-provider) +* [vagrant-shell](https://github.com/destructuring/vagrant-shell) + +## Setting up Vagrant-docker with the Engine API + +The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this: + +``` +description "Docker daemon" + +start on filesystem +stop on runlevel [!2345] + +respawn + +script + /usr/bin/dockerd -H=tcp://0.0.0.0:2375 +end script +``` + +Once that's done, you need to set up an SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: + +``` +ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost +``` + +(The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) + +Note that because the port has been changed, to run docker commands from within the command line you must run them like this: + +``` +sudo docker -H 0.0.0.0:2375 < commands for docker > +``` diff --git a/components/engine/daemon/apparmor_default.go b/components/engine/daemon/apparmor_default.go new file mode 100644 index 0000000000..2a418b25c2 --- /dev/null +++ b/components/engine/daemon/apparmor_default.go @@ -0,0 +1,36 @@ +// +build linux + +package daemon + +import ( + "fmt" + + aaprofile "github.com/docker/docker/profiles/apparmor" + "github.com/opencontainers/runc/libcontainer/apparmor" +) + +// Define constants for native driver +const ( + defaultApparmorProfile = "docker-default" +) + +func ensureDefaultAppArmorProfile() error { + if apparmor.IsEnabled() { + loaded, err := aaprofile.IsLoaded(defaultApparmorProfile) + if err != nil { + return fmt.Errorf("Could not check if %s AppArmor profile was loaded: %s", defaultApparmorProfile, err) + } + + // Nothing to do. + if loaded { + return nil + } + + // Load the profile. + if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { + return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultApparmorProfile, err) + } + } + + return nil +} diff --git a/components/engine/daemon/apparmor_default_unsupported.go b/components/engine/daemon/apparmor_default_unsupported.go new file mode 100644 index 0000000000..cd2dd9702e --- /dev/null +++ b/components/engine/daemon/apparmor_default_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package daemon + +func ensureDefaultAppArmorProfile() error { + return nil +} diff --git a/components/engine/daemon/archive.go b/components/engine/daemon/archive.go new file mode 100644 index 0000000000..dbef0b4d48 --- /dev/null +++ b/components/engine/daemon/archive.go @@ -0,0 +1,470 @@ +package daemon + +import ( + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// ErrExtractPointNotDirectory is used to convey that the operation to extract +// a tar archive to a directory in a container has failed because the specified +// path does not refer to a directory. +var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") + +// ContainerCopy performs a deprecated operation of archiving the resource at +// the specified path in the container identified by the given name. +func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if res[0] == '/' || res[0] == '\\' { + res = res[1:] + } + + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return nil, err + } + + return daemon.containerCopy(container, res) +} + +// ContainerStatPath stats the filesystem resource at the specified path in the +// container identified by the given name. +func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return nil, err + } + + return daemon.containerStatPath(container, path) +} + +// ContainerArchivePath creates an archive of the filesystem resource at the +// specified path in the container identified by the given name. Returns a +// tar archive of the resource and whether it was a directory or a single file. +func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, nil, err + } + + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return nil, nil, err + } + + return daemon.containerArchivePath(container, path) +} + +// ContainerExtractToDir extracts the given archive to the specified location +// in the filesystem of the container identified by the given name. The given +// path must be of a directory in the container. If it is not, the error will +// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will +// be an error if unpacking the given content would cause an existing directory +// to be replaced with a non-directory and vice versa. +func (daemon *Daemon) ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return err + } + + return daemon.containerExtractToDir(container, path, copyUIDGID, noOverwriteDirNonDir, content) +} + +// containerStatPath stats the filesystem resource at the specified path in this +// container. Returns stat info about the resource. +func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return nil, err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.DetachAndUnmount(daemon.LogVolumeEvent) + if err != nil { + return nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, err + } + + return container.StatPath(resolvedPath, absPath) +} + +// containerArchivePath creates an archive of the filesystem resource at the specified +// path in this container. Returns a tar archive of the resource and stat info +// about the resource. +func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err = daemon.Mount(container); err != nil { + return nil, nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.DetachAndUnmount(daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err = daemon.mountVolumes(container); err != nil { + return nil, nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, nil, err + } + + stat, err = container.StatPath(resolvedPath, absPath) + if err != nil { + return nil, nil, err + } + + // We need to rebase the archive entries if the last element of the + // resolved path was a symlink that was evaluated and is now different + // than the requested path. For example, if the given path was "/foo/bar/", + // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want + // to ensure that the archive entries start with "bar" and not "baz". This + // also catches the case when the root directory of the container is + // requested: we want the archive entries to start with "/" and not the + // container ID. + data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) + if err != nil { + return nil, nil, err + } + + content = ioutils.NewReadCloserWrapper(data, func() error { + err := data.Close() + container.DetachAndUnmount(daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + + daemon.LogContainerEvent(container, "archive-path") + + return content, stat, nil +} + +// containerExtractToDir extracts the given tar archive to the specified location in the +// filesystem of this container. The given path must be of a directory in the +// container. If it is not, the error will be ErrExtractPointNotDirectory. If +// noOverwriteDirNonDir is true then it will be an error if unpacking the +// given content would cause an existing directory to be replaced with a non- +// directory and vice versa. +func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) (err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.DetachAndUnmount(daemon.LogVolumeEvent) + if err != nil { + return err + } + + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + if err != nil { + return err + } + + // The destination path needs to be resolved to a host path, with all + // symbolic links followed in the scope of the container's rootfs. Note + // that we do not use `container.ResolvePath(path)` here because we need + // to also evaluate the last path element if it is a symlink. This is so + // that you can extract an archive to a symlink that points to a directory. + + // Consider the given path as an absolute path in the container. + absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // This will evaluate the last path element if it is a symlink. + resolvedPath, err := container.GetResourcePath(absPath) + if err != nil { + return err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return err + } + + if !stat.IsDir() { + return ErrExtractPointNotDirectory + } + + // Need to check if the path is in a volume. If it is, it cannot be in a + // read-only volume. If it is not in a volume, the container cannot be + // configured with a read-only rootfs. + + // Use the resolved path relative to the container rootfs as the new + // absPath. This way we fully follow any symlinks in a volume that may + // lead back outside the volume. + // + // The Windows implementation of filepath.Rel in golang 1.4 does not + // support volume style file path semantics. On Windows when using the + // filter driver, we are guaranteed that the path will always be + // a volume file path. + var baseRel string + if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { + if strings.HasPrefix(resolvedPath, container.BaseFS) { + baseRel = resolvedPath[len(container.BaseFS):] + if baseRel[:1] == `\` { + baseRel = baseRel[1:] + } + } + } else { + baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) + } + if err != nil { + return err + } + // Make it an absolute path. + absPath = filepath.Join(string(filepath.Separator), baseRel) + + toVolume, err := checkIfPathIsInAVolume(container, absPath) + if err != nil { + return err + } + + if !toVolume && container.HostConfig.ReadonlyRootfs { + return ErrRootFSReadOnly + } + + options := daemon.defaultTarCopyOptions(noOverwriteDirNonDir) + + if copyUIDGID { + var err error + // tarCopyOptions will appropriately pull in the right uid/gid for the + // user/group and will set the options. + options, err = daemon.tarCopyOptions(container, noOverwriteDirNonDir) + if err != nil { + return err + } + } + + if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { + return err + } + + daemon.LogContainerEvent(container, "extract-to-dir") + + return nil +} + +func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err := daemon.Mount(container); err != nil { + return nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.DetachAndUnmount(daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err := daemon.mountVolumes(container); err != nil { + return nil, err + } + + basePath, err := container.GetResourcePath(resource) + if err != nil { + return nil, err + } + stat, err := os.Stat(basePath) + if err != nil { + return nil, err + } + var filter []string + if !stat.IsDir() { + d, f := filepath.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{filepath.Base(basePath)} + basePath = filepath.Dir(basePath) + } + archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + IncludeFiles: filter, + }) + if err != nil { + return nil, err + } + + reader := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.DetachAndUnmount(daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + daemon.LogContainerEvent(container, "copy") + return reader, nil +} + +// CopyOnBuild copies/extracts a source FileInfo to a destination path inside a container +// specified by a container object. +// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). +// CopyOnBuild should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. +func (daemon *Daemon) CopyOnBuild(cID, destPath, srcRoot, srcPath string, decompress bool) error { + fullSrcPath, err := symlink.FollowSymlinkInScope(filepath.Join(srcRoot, srcPath), srcRoot) + if err != nil { + return err + } + + destExists := true + destDir := false + rootUID, rootGID := daemon.GetRemappedUIDGID() + + // Work in daemon-local OS specific file paths + destPath = filepath.FromSlash(destPath) + + c, err := daemon.GetContainer(cID) + if err != nil { + return err + } + err = daemon.Mount(c) + if err != nil { + return err + } + defer daemon.Unmount(c) + + dest, err := c.GetResourcePath(destPath) + if err != nil { + return err + } + + // Preserve the trailing slash + // TODO: why are we appending another path separator if there was already one? + if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." { + destDir = true + dest += string(os.PathSeparator) + } + + destPath = dest + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + //logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) + return err + } + destExists = false + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archiver := &archive.Archiver{ + Untar: chrootarchive.Untar, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + + src, err := os.Stat(fullSrcPath) + if err != nil { + return err + } + + if src.IsDir() { + // copy as directory + if err := archiver.CopyWithTar(fullSrcPath, destPath); err != nil { + return err + } + return fixPermissions(fullSrcPath, destPath, rootUID, rootGID, destExists) + } + if decompress && archive.IsArchivePath(fullSrcPath) { + // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) + + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in slash + tarDest := destPath + if strings.HasSuffix(tarDest, string(os.PathSeparator)) { + tarDest = filepath.Dir(destPath) + } + + // try to successfully untar the orig + err := archiver.UntarPath(fullSrcPath, tarDest) + /* + if err != nil { + logrus.Errorf("Couldn't untar to %s: %v", tarDest, err) + } + */ + return err + } + + // only needed for fixPermissions, but might as well put it before CopyFileWithTar + if destDir || (destExists && destStat.IsDir()) { + destPath = filepath.Join(destPath, filepath.Base(srcPath)) + } + + if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil { + return err + } + if err := archiver.CopyFileWithTar(fullSrcPath, destPath); err != nil { + return err + } + + return fixPermissions(fullSrcPath, destPath, rootUID, rootGID, destExists) +} diff --git a/components/engine/daemon/archive_tarcopyoptions.go b/components/engine/daemon/archive_tarcopyoptions.go new file mode 100644 index 0000000000..cbd1fc2cee --- /dev/null +++ b/components/engine/daemon/archive_tarcopyoptions.go @@ -0,0 +1,16 @@ +package daemon + +import ( + "github.com/docker/docker/pkg/archive" +) + +// defaultTarCopyOptions is the setting that is used when unpacking an archive +// for a copy API event. +func (daemon *Daemon) defaultTarCopyOptions(noOverwriteDirNonDir bool) *archive.TarOptions { + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + return &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } +} diff --git a/components/engine/daemon/archive_tarcopyoptions_unix.go b/components/engine/daemon/archive_tarcopyoptions_unix.go new file mode 100644 index 0000000000..7b68bc463d --- /dev/null +++ b/components/engine/daemon/archive_tarcopyoptions_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) { + if container.Config.User == "" { + return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil + } + + user, err := idtools.LookupUser(container.Config.User) + if err != nil { + return nil, err + } + + return &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + ChownOpts: &archive.TarChownOptions{ + UID: user.Uid, + GID: user.Gid, + }, + }, nil +} diff --git a/components/engine/daemon/archive_tarcopyoptions_windows.go b/components/engine/daemon/archive_tarcopyoptions_windows.go new file mode 100644 index 0000000000..535efd222e --- /dev/null +++ b/components/engine/daemon/archive_tarcopyoptions_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" +) + +func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) { + return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil +} diff --git a/components/engine/daemon/archive_unix.go b/components/engine/daemon/archive_unix.go new file mode 100644 index 0000000000..8806e2e198 --- /dev/null +++ b/components/engine/daemon/archive_unix.go @@ -0,0 +1,64 @@ +// +build !windows + +package daemon + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/container" +) + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + var toVolume bool + for _, mnt := range container.MountPoints { + if toVolume = mnt.HasResource(absPath); toVolume { + if mnt.RW { + break + } + return false, ErrVolumeReadonly + } + } + return toVolume, nil +} + +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // If the destination didn't already exist, or the destination isn't a + // directory, then we should Lchown the destination. Otherwise, we shouldn't + // Lchown the destination. + destStat, err := os.Stat(destination) + if err != nil { + // This should *never* be reached, because the destination must've already + // been created while untar-ing the context. + return err + } + doChownDestination := !destExisted || !destStat.IsDir() + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if !doChownDestination && (source == fullpath) { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, uid, gid) + }) +} + +// isOnlineFSOperationPermitted returns an error if an online filesystem operation +// is not permitted. +func (daemon *Daemon) isOnlineFSOperationPermitted(container *container.Container) error { + return nil +} diff --git a/components/engine/daemon/archive_windows.go b/components/engine/daemon/archive_windows.go new file mode 100644 index 0000000000..1c5a894f7c --- /dev/null +++ b/components/engine/daemon/archive_windows.go @@ -0,0 +1,44 @@ +package daemon + +import ( + "errors" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" +) + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +// +// This is a no-op on Windows which does not support read-only volumes, or +// extracting to a mount point inside a volume. TODO Windows: FIXME Post-TP5 +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + return false, nil +} + +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // chown is not supported on Windows + return nil +} + +// isOnlineFSOperationPermitted returns an error if an online filesystem operation +// is not permitted (such as stat or for copying). Running Hyper-V containers +// cannot have their file-system interrogated from the host as the filter is +// loaded inside the utility VM, not the host. +// IMPORTANT: The container lock must NOT be held when calling this function. +func (daemon *Daemon) isOnlineFSOperationPermitted(container *container.Container) error { + if !container.IsRunning() { + return nil + } + + // Determine isolation. If not specified in the hostconfig, use daemon default. + actualIsolation := container.HostConfig.Isolation + if containertypes.Isolation.IsDefault(containertypes.Isolation(actualIsolation)) { + actualIsolation = daemon.defaultIsolation + } + if containertypes.Isolation.IsHyperV(actualIsolation) { + return errors.New("filesystem operations against a running Hyper-V container are not supported") + } + return nil +} diff --git a/components/engine/daemon/attach.go b/components/engine/daemon/attach.go new file mode 100644 index 0000000000..a892799529 --- /dev/null +++ b/components/engine/daemon/attach.go @@ -0,0 +1,182 @@ +package daemon + +import ( + "context" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" +) + +// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. +func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { + keys := []byte{} + var err error + if c.DetachKeys != "" { + keys, err = term.ToBytes(c.DetachKeys) + if err != nil { + return fmt.Errorf("Invalid detach keys (%s) provided", c.DetachKeys) + } + } + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + if container.IsPaused() { + err := fmt.Errorf("Container %s is paused. Unpause the container before attach", prefixOrName) + return errors.NewRequestConflictError(err) + } + + cfg := stream.AttachConfig{ + UseStdin: c.UseStdin, + UseStdout: c.UseStdout, + UseStderr: c.UseStderr, + TTY: container.Config.Tty, + CloseStdin: container.Config.StdinOnce, + DetachKeys: keys, + } + container.StreamConfig.AttachStreams(&cfg) + + inStream, outStream, errStream, err := c.GetStreams() + if err != nil { + return err + } + defer inStream.Close() + + if !container.Config.Tty && c.MuxStreams { + errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + if cfg.UseStdin { + cfg.Stdin = inStream + } + if cfg.UseStdout { + cfg.Stdout = outStream + } + if cfg.UseStderr { + cfg.Stderr = errStream + } + + if err := daemon.containerAttach(container, &cfg, c.Logs, c.Stream); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + } + return nil +} + +// ContainerAttachRaw attaches the provided streams to the container's stdio +func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, doStream bool, attached chan struct{}) error { + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + cfg := stream.AttachConfig{ + UseStdin: stdin != nil, + UseStdout: stdout != nil, + UseStderr: stderr != nil, + TTY: container.Config.Tty, + CloseStdin: container.Config.StdinOnce, + } + container.StreamConfig.AttachStreams(&cfg) + close(attached) + if cfg.UseStdin { + cfg.Stdin = stdin + } + if cfg.UseStdout { + cfg.Stdout = stdout + } + if cfg.UseStderr { + cfg.Stderr = stderr + } + + return daemon.containerAttach(container, &cfg, false, doStream) +} + +func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.AttachConfig, logs, doStream bool) error { + if logs { + logDriver, logCreated, err := daemon.getLogger(c) + if err != nil { + return err + } + if logCreated { + defer func() { + if err = logDriver.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + }() + } + cLog, ok := logDriver.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) + defer logs.Close() + + LogLoop: + for { + select { + case msg, ok := <-logs.Msg: + if !ok { + break LogLoop + } + if msg.Source == "stdout" && cfg.Stdout != nil { + cfg.Stdout.Write(msg.Line) + } + if msg.Source == "stderr" && cfg.Stderr != nil { + cfg.Stderr.Write(msg.Line) + } + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + break LogLoop + } + } + } + + daemon.LogContainerEvent(c, "attach") + + if !doStream { + return nil + } + + if cfg.Stdin != nil { + r, w := io.Pipe() + go func(stdin io.ReadCloser) { + defer w.Close() + defer logrus.Debug("Closing buffered stdin pipe") + io.Copy(w, stdin) + }(cfg.Stdin) + cfg.Stdin = r + } + + if !c.Config.OpenStdin { + cfg.Stdin = nil + } + + if c.Config.StdinOnce && !c.Config.Tty { + // Wait for the container to stop before returning. + waitChan := c.Wait(context.Background(), container.WaitConditionNotRunning) + defer func() { + _ = <-waitChan // Ignore returned exit code. + }() + } + + ctx := c.InitAttachContext() + err := <-c.StreamConfig.CopyStreams(ctx, cfg) + if err != nil { + if _, ok := err.(term.EscapeError); ok { + daemon.LogContainerEvent(c, "detach") + } else { + logrus.Errorf("attach failed with error: %v", err) + } + } + + return nil +} diff --git a/components/engine/daemon/auth.go b/components/engine/daemon/auth.go new file mode 100644 index 0000000000..f5f4d7bf24 --- /dev/null +++ b/components/engine/daemon/auth.go @@ -0,0 +1,13 @@ +package daemon + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" +) + +// AuthenticateToRegistry checks the validity of credentials in authConfig +func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { + return daemon.RegistryService.Auth(ctx, authConfig, dockerversion.DockerUserAgent(ctx)) +} diff --git a/components/engine/daemon/bindmount_solaris.go b/components/engine/daemon/bindmount_solaris.go new file mode 100644 index 0000000000..87bf3ef72e --- /dev/null +++ b/components/engine/daemon/bindmount_solaris.go @@ -0,0 +1,5 @@ +// +build solaris + +package daemon + +const bindMountType = "lofs" diff --git a/components/engine/daemon/bindmount_unix.go b/components/engine/daemon/bindmount_unix.go new file mode 100644 index 0000000000..3966babb41 --- /dev/null +++ b/components/engine/daemon/bindmount_unix.go @@ -0,0 +1,5 @@ +// +build linux freebsd + +package daemon + +const bindMountType = "bind" diff --git a/components/engine/daemon/build.go b/components/engine/daemon/build.go new file mode 100644 index 0000000000..1bf05ded2c --- /dev/null +++ b/components/engine/daemon/build.go @@ -0,0 +1,122 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "golang.org/x/net/context" + "io" +) + +type releaseableLayer struct { + layerStore layer.Store + roLayer layer.Layer + rwLayer layer.RWLayer +} + +func (rl *releaseableLayer) Mount() (string, error) { + if rl.roLayer == nil { + return "", errors.New("can not mount an image with no root FS") + } + var err error + mountID := stringid.GenerateRandomID() + rl.rwLayer, err = rl.layerStore.CreateRWLayer(mountID, rl.roLayer.ChainID(), nil) + if err != nil { + return "", errors.Wrap(err, "failed to create rwlayer") + } + + return rl.rwLayer.Mount("") +} + +func (rl *releaseableLayer) Release() error { + rl.releaseRWLayer() + return rl.releaseROLayer() +} + +func (rl *releaseableLayer) releaseRWLayer() error { + if rl.rwLayer == nil { + return nil + } + metadata, err := rl.layerStore.ReleaseRWLayer(rl.rwLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + logrus.Errorf("Failed to release RWLayer: %s", err) + } + return err +} + +func (rl *releaseableLayer) releaseROLayer() error { + if rl.roLayer == nil { + return nil + } + metadata, err := rl.layerStore.Release(rl.roLayer) + layer.LogReleaseMetadata(metadata) + return err +} + +func newReleasableLayerForImage(img *image.Image, layerStore layer.Store) (builder.ReleaseableLayer, error) { + if img.RootFS.ChainID() == "" { + return nil, nil + } + // Hold a reference to the image layer so that it can't be removed before + // it is released + roLayer, err := layerStore.Get(img.RootFS.ChainID()) + if err != nil { + return nil, errors.Wrapf(err, "failed to get layer for image %s", img.ImageID()) + } + return &releaseableLayer{layerStore: layerStore, roLayer: roLayer}, nil +} + +// TODO: could this use the regular daemon PullImage ? +func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (*image.Image, error) { + ref, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, err + } + ref = reference.TagNameOnly(ref) + + pullRegistryAuth := &types.AuthConfig{} + if len(authConfigs) > 0 { + // The request came with a full auth config, use it + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, err + } + + resolvedConfig := registry.ResolveAuthConfig(authConfigs, repoInfo.Index) + pullRegistryAuth = &resolvedConfig + } + + if err := daemon.pullImageWithReference(ctx, ref, nil, pullRegistryAuth, output); err != nil { + return nil, err + } + return daemon.GetImage(name) +} + +// GetImageAndReleasableLayer returns an image and releaseable layer for a reference or ID. +// Every call to GetImageAndReleasableLayer MUST call releasableLayer.Release() to prevent +// leaking of layers. +func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ReleaseableLayer, error) { + if !opts.ForcePull { + image, _ := daemon.GetImage(refOrID) + // TODO: shouldn't we error out if error is different from "not found" ? + if image != nil { + layer, err := newReleasableLayerForImage(image, daemon.layerStore) + return image, layer, err + } + } + + image, err := daemon.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output) + if err != nil { + return nil, nil, err + } + layer, err := newReleasableLayerForImage(image, daemon.layerStore) + return image, layer, err +} diff --git a/components/engine/daemon/cache.go b/components/engine/daemon/cache.go new file mode 100644 index 0000000000..8e3d207758 --- /dev/null +++ b/components/engine/daemon/cache.go @@ -0,0 +1,27 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/builder" + "github.com/docker/docker/image/cache" +) + +// MakeImageCache creates a stateful image cache. +func (daemon *Daemon) MakeImageCache(sourceRefs []string) builder.ImageCache { + if len(sourceRefs) == 0 { + return cache.NewLocal(daemon.imageStore) + } + + cache := cache.New(daemon.imageStore) + + for _, ref := range sourceRefs { + img, err := daemon.GetImage(ref) + if err != nil { + logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) + continue + } + cache.Populate(img) + } + + return cache +} diff --git a/components/engine/daemon/caps/utils_unix.go b/components/engine/daemon/caps/utils_unix.go new file mode 100644 index 0000000000..c99485f51d --- /dev/null +++ b/components/engine/daemon/caps/utils_unix.go @@ -0,0 +1,131 @@ +// +build !windows + +package caps + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/stringutils" + "github.com/syndtr/gocapability/capability" +) + +var capabilityList Capabilities + +func init() { + last := capability.CAP_LAST_CAP + // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + for _, cap := range capability.List() { + if cap > last { + continue + } + capabilityList = append(capabilityList, + &CapabilityMapping{ + Key: "CAP_" + strings.ToUpper(cap.String()), + Value: cap, + }, + ) + } +} + +type ( + // CapabilityMapping maps linux capability name to its value of capability.Cap type + // Capabilities is one of the security systems in Linux Security Module (LSM) + // framework provided by the kernel. + // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html + CapabilityMapping struct { + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` + } + // Capabilities contains all CapabilityMapping + Capabilities []*CapabilityMapping +) + +// String returns of CapabilityMapping +func (c *CapabilityMapping) String() string { + return c.Key +} + +// GetCapability returns CapabilityMapping which contains specific key +func GetCapability(key string) *CapabilityMapping { + for _, capp := range capabilityList { + if capp.Key == key { + cpy := *capp + return &cpy + } + } + return nil +} + +// GetAllCapabilities returns all of the capabilities +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + +// TweakCapabilities can tweak capabilities by adding or dropping capabilities +// based on the basics capabilities. +func TweakCapabilities(basics, adds, drops []string) ([]string, error) { + var ( + newCaps []string + allCaps = GetAllCapabilities() + ) + + // FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix + // Currently they are mixed in here. We should do conversion in one place. + + // look for invalid cap in the drop list + for _, cap := range drops { + if strings.ToLower(cap) == "all" { + continue + } + + if !stringutils.InSlice(allCaps, "CAP_"+cap) { + return nil, fmt.Errorf("Unknown capability drop: %q", cap) + } + } + + // handle --cap-add=all + if stringutils.InSlice(adds, "all") { + basics = allCaps + } + + if !stringutils.InSlice(drops, "all") { + for _, cap := range basics { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + // if we don't drop `all`, add back all the non-dropped caps + if !stringutils.InSlice(drops, cap[4:]) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + } + + for _, cap := range adds { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + cap = "CAP_" + cap + + if !stringutils.InSlice(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability to add: %q", cap) + } + + // add cap if not already in the list + if !stringutils.InSlice(newCaps, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + return newCaps, nil +} diff --git a/components/engine/daemon/changes.go b/components/engine/daemon/changes.go new file mode 100644 index 0000000000..fc8cd2752c --- /dev/null +++ b/components/engine/daemon/changes.go @@ -0,0 +1,31 @@ +package daemon + +import ( + "errors" + "runtime" + "time" + + "github.com/docker/docker/pkg/archive" +) + +// ContainerChanges returns a list of container fs changes +func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if runtime.GOOS == "windows" && container.IsRunning() { + return nil, errors.New("Windows does not support diff of a running container") + } + + container.Lock() + defer container.Unlock() + c, err := container.RWLayer.Changes() + if err != nil { + return nil, err + } + containerActions.WithValues("changes").UpdateSince(start) + return c, nil +} diff --git a/components/engine/daemon/checkpoint.go b/components/engine/daemon/checkpoint.go new file mode 100644 index 0000000000..d3028f1e28 --- /dev/null +++ b/components/engine/daemon/checkpoint.go @@ -0,0 +1,137 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" +) + +var ( + validCheckpointNameChars = api.RestrictedNameChars + validCheckpointNamePattern = api.RestrictedNamePattern +) + +// getCheckpointDir verifies checkpoint directory for create,remove, list options and checks if checkpoint already exists +func getCheckpointDir(checkDir, checkpointID string, ctrName string, ctrID string, ctrCheckpointDir string, create bool) (string, error) { + var checkpointDir string + var err2 error + if checkDir != "" { + checkpointDir = filepath.Join(checkDir, ctrID, "checkpoints") + } else { + checkpointDir = ctrCheckpointDir + } + checkpointAbsDir := filepath.Join(checkpointDir, checkpointID) + stat, err := os.Stat(checkpointAbsDir) + if create { + switch { + case err == nil && stat.IsDir(): + err2 = fmt.Errorf("checkpoint with name %s already exists for container %s", checkpointID, ctrName) + case err != nil && os.IsNotExist(err): + err2 = nil + case err != nil: + err2 = err + case err == nil: + err2 = fmt.Errorf("%s exists and is not a directory", checkpointAbsDir) + } + } else { + switch { + case err != nil: + err2 = fmt.Errorf("checkpoint %s does not exists for container %s", checkpointID, ctrName) + case err == nil && stat.IsDir(): + err2 = nil + case err == nil: + err2 = fmt.Errorf("%s exists and is not a directory", checkpointAbsDir) + } + } + return checkpointDir, err2 +} + +// CheckpointCreate checkpoints the process running in a container with CRIU +func (daemon *Daemon) CheckpointCreate(name string, config types.CheckpointCreateOptions) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return fmt.Errorf("Container %s not running", name) + } + + if !validCheckpointNamePattern.MatchString(config.CheckpointID) { + return fmt.Errorf("Invalid checkpoint ID (%s), only %s are allowed", config.CheckpointID, validCheckpointNameChars) + } + + checkpointDir, err := getCheckpointDir(config.CheckpointDir, config.CheckpointID, name, container.ID, container.CheckpointDir(), true) + if err != nil { + return fmt.Errorf("cannot checkpoint container %s: %s", name, err) + } + + err = daemon.containerd.CreateCheckpoint(container.ID, config.CheckpointID, checkpointDir, config.Exit) + if err != nil { + return fmt.Errorf("Cannot checkpoint container %s: %s", name, err) + } + + daemon.LogContainerEvent(container, "checkpoint") + + return nil +} + +// CheckpointDelete deletes the specified checkpoint +func (daemon *Daemon) CheckpointDelete(name string, config types.CheckpointDeleteOptions) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + checkpointDir, err := getCheckpointDir(config.CheckpointDir, config.CheckpointID, name, container.ID, container.CheckpointDir(), false) + if err == nil { + return os.RemoveAll(filepath.Join(checkpointDir, config.CheckpointID)) + } + return err +} + +// CheckpointList lists all checkpoints of the specified container +func (daemon *Daemon) CheckpointList(name string, config types.CheckpointListOptions) ([]types.Checkpoint, error) { + var out []types.Checkpoint + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + checkpointDir, err := getCheckpointDir(config.CheckpointDir, "", name, container.ID, container.CheckpointDir(), false) + if err != nil { + return nil, err + } + + if err := os.MkdirAll(checkpointDir, 0755); err != nil { + return nil, err + } + + dirs, err := ioutil.ReadDir(checkpointDir) + if err != nil { + return nil, err + } + + for _, d := range dirs { + if !d.IsDir() { + continue + } + path := filepath.Join(checkpointDir, d.Name(), "config.json") + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + var cpt types.Checkpoint + if err := json.Unmarshal(data, &cpt); err != nil { + return nil, err + } + out = append(out, cpt) + } + + return out, nil +} diff --git a/components/engine/daemon/cluster.go b/components/engine/daemon/cluster.go new file mode 100644 index 0000000000..d22970bcd7 --- /dev/null +++ b/components/engine/daemon/cluster.go @@ -0,0 +1,26 @@ +package daemon + +import ( + apitypes "github.com/docker/docker/api/types" + lncluster "github.com/docker/libnetwork/cluster" +) + +// Cluster is the interface for github.com/docker/docker/daemon/cluster.(*Cluster). +type Cluster interface { + ClusterStatus + NetworkManager + SendClusterEvent(event lncluster.ConfigEventType) +} + +// ClusterStatus interface provides information about the Swarm status of the Cluster +type ClusterStatus interface { + IsAgent() bool + IsManager() bool +} + +// NetworkManager provides methods to manage networks +type NetworkManager interface { + GetNetwork(input string) (apitypes.NetworkResource, error) + GetNetworks() ([]apitypes.NetworkResource, error) + RemoveNetwork(input string) error +} diff --git a/components/engine/daemon/cluster/cluster.go b/components/engine/daemon/cluster/cluster.go new file mode 100644 index 0000000000..6874dbf0ee --- /dev/null +++ b/components/engine/daemon/cluster/cluster.go @@ -0,0 +1,439 @@ +package cluster + +// +// ## Swarmkit integration +// +// Cluster - static configurable object for accessing everything swarm related. +// Contains methods for connecting and controlling the cluster. Exists always, +// even if swarm mode is not enabled. +// +// NodeRunner - Manager for starting the swarmkit node. Is present only and +// always if swarm mode is enabled. Implements backoff restart loop in case of +// errors. +// +// NodeState - Information about the current node status including access to +// gRPC clients if a manager is active. +// +// ### Locking +// +// `cluster.controlMutex` - taken for the whole lifecycle of the processes that +// can reconfigure cluster(init/join/leave etc). Protects that one +// reconfiguration action has fully completed before another can start. +// +// `cluster.mu` - taken when the actual changes in cluster configurations +// happen. Different from `controlMutex` because in some cases we need to +// access current cluster state even if the long-running reconfiguration is +// going on. For example network stack may ask for the current cluster state in +// the middle of the shutdown. Any time current cluster state is asked you +// should take the read lock of `cluster.mu`. If you are writing an API +// responder that returns synchronously, hold `cluster.mu.RLock()` for the +// duration of the whole handler function. That ensures that node will not be +// shut down until the handler has finished. +// +// NodeRunner implements its internal locks that should not be used outside of +// the struct. Instead, you should just call `nodeRunner.State()` method to get +// the current state of the cluster(still need `cluster.mu.RLock()` to access +// `cluster.nr` reference itself). Most of the changes in NodeRunner happen +// because of an external event(network problem, unexpected swarmkit error) and +// Docker shouldn't take any locks that delay these changes from happening. +// + +import ( + "fmt" + "net" + "os" + "path/filepath" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/docker/pkg/signal" + lncluster "github.com/docker/libnetwork/cluster" + swarmapi "github.com/docker/swarmkit/api" + swarmnode "github.com/docker/swarmkit/node" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +const swarmDirName = "swarm" +const controlSocket = "control.sock" +const swarmConnectTimeout = 20 * time.Second +const swarmRequestTimeout = 20 * time.Second +const stateFile = "docker-state.json" +const defaultAddr = "0.0.0.0:2377" + +const ( + initialReconnectDelay = 100 * time.Millisecond + maxReconnectDelay = 30 * time.Second + contextPrefix = "com.docker.swarm" +) + +// errNoSwarm is returned on leaving a cluster that was never initialized +var errNoSwarm = errors.New("This node is not part of a swarm") + +// errSwarmExists is returned on initialize or join request for a cluster that has already been activated +var errSwarmExists = errors.New("This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one.") + +// errSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached. +var errSwarmJoinTimeoutReached = errors.New("Timeout was reached before node was joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node.") + +// errSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it. +var errSwarmLocked = errors.New("Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it.") + +// errSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically. +var errSwarmCertificatesExpired = errors.New("Swarm certificates have expired. To replace them, leave the swarm and join again.") + +// NetworkSubnetsProvider exposes functions for retrieving the subnets +// of networks managed by Docker, so they can be filtered. +type NetworkSubnetsProvider interface { + Subnets() ([]net.IPNet, []net.IPNet) +} + +// Config provides values for Cluster. +type Config struct { + Root string + Name string + Backend executorpkg.Backend + NetworkSubnetsProvider NetworkSubnetsProvider + + // DefaultAdvertiseAddr is the default host/IP or network interface to use + // if no AdvertiseAddr value is specified. + DefaultAdvertiseAddr string + + // path to store runtime state, such as the swarm control socket + RuntimeRoot string + + // WatchStream is a channel to pass watch API notifications to daemon + WatchStream chan *swarmapi.WatchMessage +} + +// Cluster provides capabilities to participate in a cluster as a worker or a +// manager. +type Cluster struct { + mu sync.RWMutex + controlMutex sync.RWMutex // protect init/join/leave user operations + nr *nodeRunner + root string + runtimeRoot string + config Config + configEvent chan lncluster.ConfigEventType // todo: make this array and goroutine safe + attachers map[string]*attacher + watchStream chan *swarmapi.WatchMessage +} + +// attacher manages the in-memory attachment state of a container +// attachment to a global scope network managed by swarm manager. It +// helps in identifying the attachment ID via the taskID and the +// corresponding attachment configuration obtained from the manager. +type attacher struct { + taskID string + config *network.NetworkingConfig + inProgress bool + attachWaitCh chan *network.NetworkingConfig + attachCompleteCh chan struct{} + detachWaitCh chan struct{} +} + +// New creates a new Cluster instance using provided config. +func New(config Config) (*Cluster, error) { + root := filepath.Join(config.Root, swarmDirName) + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + if config.RuntimeRoot == "" { + config.RuntimeRoot = root + } + if err := os.MkdirAll(config.RuntimeRoot, 0700); err != nil { + return nil, err + } + c := &Cluster{ + root: root, + config: config, + configEvent: make(chan lncluster.ConfigEventType, 10), + runtimeRoot: config.RuntimeRoot, + attachers: make(map[string]*attacher), + watchStream: config.WatchStream, + } + return c, nil +} + +// Start the Cluster instance +// TODO The split between New and Start can be join again when the SendClusterEvent +// method is no longer required +func (c *Cluster) Start() error { + root := filepath.Join(c.config.Root, swarmDirName) + + nodeConfig, err := loadPersistentState(root) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + nr, err := c.newNodeRunner(*nodeConfig) + if err != nil { + return err + } + c.nr = nr + + select { + case <-time.After(swarmConnectTimeout): + logrus.Error("swarm component could not be started before timeout was reached") + case err := <-nr.Ready(): + if err != nil { + logrus.WithError(err).Error("swarm component could not be started") + return nil + } + } + return nil +} + +func (c *Cluster) newNodeRunner(conf nodeStartConfig) (*nodeRunner, error) { + if err := c.config.Backend.IsSwarmCompatible(); err != nil { + return nil, err + } + + actualLocalAddr := conf.LocalAddr + if actualLocalAddr == "" { + // If localAddr was not specified, resolve it automatically + // based on the route to joinAddr. localAddr can only be left + // empty on "join". + listenHost, _, err := net.SplitHostPort(conf.ListenAddr) + if err != nil { + return nil, fmt.Errorf("could not parse listen address: %v", err) + } + + listenAddrIP := net.ParseIP(listenHost) + if listenAddrIP == nil || !listenAddrIP.IsUnspecified() { + actualLocalAddr = listenHost + } else { + if conf.RemoteAddr == "" { + // Should never happen except using swarms created by + // old versions that didn't save remoteAddr. + conf.RemoteAddr = "8.8.8.8:53" + } + conn, err := net.Dial("udp", conf.RemoteAddr) + if err != nil { + return nil, fmt.Errorf("could not find local IP address: %v", err) + } + localHostPort := conn.LocalAddr().String() + actualLocalAddr, _, _ = net.SplitHostPort(localHostPort) + conn.Close() + } + } + + nr := &nodeRunner{cluster: c} + nr.actualLocalAddr = actualLocalAddr + + if err := nr.Start(conf); err != nil { + return nil, err + } + + c.config.Backend.DaemonJoinsCluster(c) + + return nr, nil +} + +func (c *Cluster) getRequestContext() (context.Context, func()) { // TODO: not needed when requests don't block on qourum lost + return context.WithTimeout(context.Background(), swarmRequestTimeout) +} + +// IsManager returns true if Cluster is participating as a manager. +func (c *Cluster) IsManager() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.currentNodeState().IsActiveManager() +} + +// IsAgent returns true if Cluster is participating as a worker/agent. +func (c *Cluster) IsAgent() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.currentNodeState().status == types.LocalNodeStateActive +} + +// GetLocalAddress returns the local address. +func (c *Cluster) GetLocalAddress() string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.currentNodeState().actualLocalAddr +} + +// GetListenAddress returns the listen address. +func (c *Cluster) GetListenAddress() string { + c.mu.RLock() + defer c.mu.RUnlock() + if c.nr != nil { + return c.nr.config.ListenAddr + } + return "" +} + +// GetAdvertiseAddress returns the remotely reachable address of this node. +func (c *Cluster) GetAdvertiseAddress() string { + c.mu.RLock() + defer c.mu.RUnlock() + if c.nr != nil && c.nr.config.AdvertiseAddr != "" { + advertiseHost, _, _ := net.SplitHostPort(c.nr.config.AdvertiseAddr) + return advertiseHost + } + return c.currentNodeState().actualLocalAddr +} + +// GetDataPathAddress returns the address to be used for the data path traffic, if specified. +func (c *Cluster) GetDataPathAddress() string { + c.mu.RLock() + defer c.mu.RUnlock() + if c.nr != nil { + return c.nr.config.DataPathAddr + } + return "" +} + +// GetRemoteAddressList returns the advertise address for each of the remote managers if +// available. +func (c *Cluster) GetRemoteAddressList() []string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.getRemoteAddressList() +} + +func (c *Cluster) getRemoteAddressList() []string { + state := c.currentNodeState() + if state.swarmNode == nil { + return []string{} + } + + nodeID := state.swarmNode.NodeID() + remotes := state.swarmNode.Remotes() + addressList := make([]string, 0, len(remotes)) + for _, r := range remotes { + if r.NodeID != nodeID { + addressList = append(addressList, r.Addr) + } + } + return addressList +} + +// ListenClusterEvents returns a channel that receives messages on cluster +// participation changes. +// todo: make cancelable and accessible to multiple callers +func (c *Cluster) ListenClusterEvents() <-chan lncluster.ConfigEventType { + return c.configEvent +} + +// currentNodeState should not be called without a read lock +func (c *Cluster) currentNodeState() nodeState { + return c.nr.State() +} + +// errNoManager returns error describing why manager commands can't be used. +// Call with read lock. +func (c *Cluster) errNoManager(st nodeState) error { + if st.swarmNode == nil { + if errors.Cause(st.err) == errSwarmLocked { + return errSwarmLocked + } + if st.err == errSwarmCertificatesExpired { + return errSwarmCertificatesExpired + } + return errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } + if st.swarmNode.Manager() != nil { + return errors.New("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.") + } + return errors.New("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.") +} + +// Cleanup stops active swarm node. This is run before daemon shutdown. +func (c *Cluster) Cleanup() { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + + c.mu.Lock() + node := c.nr + if node == nil { + c.mu.Unlock() + return + } + state := c.currentNodeState() + c.mu.Unlock() + + if state.IsActiveManager() { + active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID()) + if err == nil { + singlenode := active && isLastManager(reachable, unreachable) + if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) { + logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) + } + } + } + + if err := node.Stop(); err != nil { + logrus.Errorf("failed to shut down cluster node: %v", err) + signal.DumpStacks("") + } + + c.mu.Lock() + c.nr = nil + c.mu.Unlock() +} + +func managerStats(client swarmapi.ControlClient, currentNodeID string) (current bool, reachable int, unreachable int, err error) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + nodes, err := client.ListNodes(ctx, &swarmapi.ListNodesRequest{}) + if err != nil { + return false, 0, 0, err + } + for _, n := range nodes.Nodes { + if n.ManagerStatus != nil { + if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_REACHABLE { + reachable++ + if n.ID == currentNodeID { + current = true + } + } + if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE { + unreachable++ + } + } + } + return +} + +func detectLockedError(err error) error { + if err == swarmnode.ErrInvalidUnlockKey { + return errors.WithStack(errSwarmLocked) + } + return err +} + +func (c *Cluster) lockedManagerAction(fn func(ctx context.Context, state nodeState) error) error { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return c.errNoManager(state) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + return fn(ctx, state) +} + +// SendClusterEvent allows to send cluster events on the configEvent channel +// TODO This method should not be exposed. +// Currently it is used to notify the network controller that the keys are +// available +func (c *Cluster) SendClusterEvent(event lncluster.ConfigEventType) { + c.mu.RLock() + defer c.mu.RUnlock() + c.configEvent <- event +} diff --git a/components/engine/daemon/cluster/configs.go b/components/engine/daemon/cluster/configs.go new file mode 100644 index 0000000000..3d418c1405 --- /dev/null +++ b/components/engine/daemon/cluster/configs.go @@ -0,0 +1,117 @@ +package cluster + +import ( + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// GetConfig returns a config from a managed swarm cluster +func (c *Cluster) GetConfig(input string) (types.Config, error) { + var config *swarmapi.Config + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + config = s + return nil + }); err != nil { + return types.Config{}, err + } + return convert.ConfigFromGRPC(config), nil +} + +// GetConfigs returns all configs of a managed swarm cluster. +func (c *Cluster) GetConfigs(options apitypes.ConfigListOptions) ([]types.Config, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + filters, err := newListConfigsFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListConfigs(ctx, + &swarmapi.ListConfigsRequest{Filters: filters}) + if err != nil { + return nil, err + } + + configs := []types.Config{} + + for _, config := range r.Configs { + configs = append(configs, convert.ConfigFromGRPC(config)) + } + + return configs, nil +} + +// CreateConfig creates a new config in a managed swarm cluster. +func (c *Cluster) CreateConfig(s types.ConfigSpec) (string, error) { + var resp *swarmapi.CreateConfigResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + configSpec := convert.ConfigSpecToGRPC(s) + + r, err := state.controlClient.CreateConfig(ctx, + &swarmapi.CreateConfigRequest{Spec: &configSpec}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + return resp.Config.ID, nil +} + +// RemoveConfig removes a config from a managed swarm cluster. +func (c *Cluster) RemoveConfig(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + config, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + + req := &swarmapi.RemoveConfigRequest{ + ConfigID: config.ID, + } + + _, err = state.controlClient.RemoveConfig(ctx, req) + return err + }) +} + +// UpdateConfig updates a config in a managed swarm cluster. +// Note: this is not exposed to the CLI but is available from the API only +func (c *Cluster) UpdateConfig(input string, version uint64, spec types.ConfigSpec) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + config, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + + configSpec := convert.ConfigSpecToGRPC(spec) + + _, err = state.controlClient.UpdateConfig(ctx, + &swarmapi.UpdateConfigRequest{ + ConfigID: config.ID, + ConfigVersion: &swarmapi.Version{ + Index: version, + }, + Spec: &configSpec, + }) + return err + }) +} diff --git a/components/engine/daemon/cluster/controllers/plugin/controller.go b/components/engine/daemon/cluster/controllers/plugin/controller.go new file mode 100644 index 0000000000..de7eb2c00f --- /dev/null +++ b/components/engine/daemon/cluster/controllers/plugin/controller.go @@ -0,0 +1,79 @@ +package plugin + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// Controller is the controller for the plugin backend +type Controller struct{} + +// NewController returns a new cluster plugin controller +func NewController() (*Controller, error) { + return &Controller{}, nil +} + +// Update is the update phase from swarmkit +func (p *Controller) Update(ctx context.Context, t *api.Task) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Update") + return nil +} + +// Prepare is the prepare phase from swarmkit +func (p *Controller) Prepare(ctx context.Context) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Prepare") + return nil +} + +// Start is the start phase from swarmkit +func (p *Controller) Start(ctx context.Context) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Start") + return nil +} + +// Wait causes the task to wait until returned +func (p *Controller) Wait(ctx context.Context) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Wait") + return nil +} + +// Shutdown is the shutdown phase from swarmkit +func (p *Controller) Shutdown(ctx context.Context) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Shutdown") + return nil +} + +// Terminate is the terminate phase from swarmkit +func (p *Controller) Terminate(ctx context.Context) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Terminate") + return nil +} + +// Remove is the remove phase from swarmkit +func (p *Controller) Remove(ctx context.Context) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Remove") + return nil +} + +// Close is the close phase from swarmkit +func (p *Controller) Close() error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Close") + return nil +} diff --git a/components/engine/daemon/cluster/convert/config.go b/components/engine/daemon/cluster/convert/config.go new file mode 100644 index 0000000000..6b28712ff9 --- /dev/null +++ b/components/engine/daemon/cluster/convert/config.go @@ -0,0 +1,61 @@ +package convert + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// ConfigFromGRPC converts a grpc Config to a Config. +func ConfigFromGRPC(s *swarmapi.Config) swarmtypes.Config { + config := swarmtypes.Config{ + ID: s.ID, + Spec: swarmtypes.ConfigSpec{ + Annotations: annotationsFromGRPC(s.Spec.Annotations), + Data: s.Spec.Data, + }, + } + + config.Version.Index = s.Meta.Version.Index + // Meta + config.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + config.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + return config +} + +// ConfigSpecToGRPC converts Config to a grpc Config. +func ConfigSpecToGRPC(s swarmtypes.ConfigSpec) swarmapi.ConfigSpec { + return swarmapi.ConfigSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + Data: s.Data, + } +} + +// ConfigReferencesFromGRPC converts a slice of grpc ConfigReference to ConfigReference +func ConfigReferencesFromGRPC(s []*swarmapi.ConfigReference) []*swarmtypes.ConfigReference { + refs := []*swarmtypes.ConfigReference{} + + for _, r := range s { + ref := &swarmtypes.ConfigReference{ + ConfigID: r.ConfigID, + ConfigName: r.ConfigName, + } + + if t, ok := r.Target.(*swarmapi.ConfigReference_File); ok { + ref.File = &swarmtypes.ConfigReferenceFileTarget{ + Name: t.File.Name, + UID: t.File.UID, + GID: t.File.GID, + Mode: t.File.Mode, + } + } + + refs = append(refs, ref) + } + + return refs +} diff --git a/components/engine/daemon/cluster/convert/container.go b/components/engine/daemon/cluster/convert/container.go new file mode 100644 index 0000000000..a468c5f846 --- /dev/null +++ b/components/engine/daemon/cluster/convert/container.go @@ -0,0 +1,353 @@ +package convert + +import ( + "errors" + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + container "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec { + containerSpec := types.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + StopSignal: c.StopSignal, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + ReadOnly: c.ReadOnly, + Hosts: c.Hosts, + Secrets: secretReferencesFromGRPC(c.Secrets), + Configs: configReferencesFromGRPC(c.Configs), + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &types.DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + // Privileges + if c.Privileges != nil { + containerSpec.Privileges = &types.Privileges{} + + if c.Privileges.CredentialSpec != nil { + containerSpec.Privileges.CredentialSpec = &types.CredentialSpec{} + switch c.Privileges.CredentialSpec.Source.(type) { + case *swarmapi.Privileges_CredentialSpec_File: + containerSpec.Privileges.CredentialSpec.File = c.Privileges.CredentialSpec.GetFile() + case *swarmapi.Privileges_CredentialSpec_Registry: + containerSpec.Privileges.CredentialSpec.Registry = c.Privileges.CredentialSpec.GetRegistry() + } + } + + if c.Privileges.SELinuxContext != nil { + containerSpec.Privileges.SELinuxContext = &types.SELinuxContext{ + Disable: c.Privileges.SELinuxContext.Disable, + User: c.Privileges.SELinuxContext.User, + Type: c.Privileges.SELinuxContext.Type, + Role: c.Privileges.SELinuxContext.Role, + Level: c.Privileges.SELinuxContext.Level, + } + } + } + + // Mounts + for _, m := range c.Mounts { + mount := mounttypes.Mount{ + Target: m.Target, + Source: m.Source, + Type: mounttypes.Type(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])), + ReadOnly: m.ReadOnly, + } + + if m.BindOptions != nil { + mount.BindOptions = &mounttypes.BindOptions{ + Propagation: mounttypes.Propagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])), + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &mounttypes.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &mounttypes.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &mounttypes.TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.StopGracePeriod != nil { + grace, _ := gogotypes.DurationFromProto(c.StopGracePeriod) + containerSpec.StopGracePeriod = &grace + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigFromGRPC(c.Healthcheck) + } + + return containerSpec +} + +func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretReference { + refs := make([]*swarmapi.SecretReference, 0, len(sr)) + for _, s := range sr { + ref := &swarmapi.SecretReference{ + SecretID: s.SecretID, + SecretName: s.SecretName, + } + if s.File != nil { + ref.Target = &swarmapi.SecretReference_File{ + File: &swarmapi.FileTarget{ + Name: s.File.Name, + UID: s.File.UID, + GID: s.File.GID, + Mode: s.File.Mode, + }, + } + } + + refs = append(refs, ref) + } + + return refs +} + +func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretReference { + refs := make([]*types.SecretReference, 0, len(sr)) + for _, s := range sr { + target := s.GetFile() + if target == nil { + // not a file target + logrus.Warnf("secret target not a file: secret=%s", s.SecretID) + continue + } + refs = append(refs, &types.SecretReference{ + File: &types.SecretReferenceFileTarget{ + Name: target.Name, + UID: target.UID, + GID: target.GID, + Mode: target.Mode, + }, + SecretID: s.SecretID, + SecretName: s.SecretName, + }) + } + + return refs +} + +func configReferencesToGRPC(sr []*types.ConfigReference) []*swarmapi.ConfigReference { + refs := make([]*swarmapi.ConfigReference, 0, len(sr)) + for _, s := range sr { + ref := &swarmapi.ConfigReference{ + ConfigID: s.ConfigID, + ConfigName: s.ConfigName, + } + if s.File != nil { + ref.Target = &swarmapi.ConfigReference_File{ + File: &swarmapi.FileTarget{ + Name: s.File.Name, + UID: s.File.UID, + GID: s.File.GID, + Mode: s.File.Mode, + }, + } + } + + refs = append(refs, ref) + } + + return refs +} + +func configReferencesFromGRPC(sr []*swarmapi.ConfigReference) []*types.ConfigReference { + refs := make([]*types.ConfigReference, 0, len(sr)) + for _, s := range sr { + target := s.GetFile() + if target == nil { + // not a file target + logrus.Warnf("config target not a file: config=%s", s.ConfigID) + continue + } + refs = append(refs, &types.ConfigReference{ + File: &types.ConfigReferenceFileTarget{ + Name: target.Name, + UID: target.UID, + GID: target.GID, + Mode: target.Mode, + }, + ConfigID: s.ConfigID, + ConfigName: s.ConfigName, + }) + } + + return refs +} + +func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) { + containerSpec := &swarmapi.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + StopSignal: c.StopSignal, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + ReadOnly: c.ReadOnly, + Hosts: c.Hosts, + Secrets: secretReferencesToGRPC(c.Secrets), + Configs: configReferencesToGRPC(c.Configs), + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &swarmapi.ContainerSpec_DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + if c.StopGracePeriod != nil { + containerSpec.StopGracePeriod = gogotypes.DurationProto(*c.StopGracePeriod) + } + + // Privileges + if c.Privileges != nil { + containerSpec.Privileges = &swarmapi.Privileges{} + + if c.Privileges.CredentialSpec != nil { + containerSpec.Privileges.CredentialSpec = &swarmapi.Privileges_CredentialSpec{} + + if c.Privileges.CredentialSpec.File != "" && c.Privileges.CredentialSpec.Registry != "" { + return nil, errors.New("cannot specify both \"file\" and \"registry\" credential specs") + } + if c.Privileges.CredentialSpec.File != "" { + containerSpec.Privileges.CredentialSpec.Source = &swarmapi.Privileges_CredentialSpec_File{ + File: c.Privileges.CredentialSpec.File, + } + } else if c.Privileges.CredentialSpec.Registry != "" { + containerSpec.Privileges.CredentialSpec.Source = &swarmapi.Privileges_CredentialSpec_Registry{ + Registry: c.Privileges.CredentialSpec.Registry, + } + } else { + return nil, errors.New("must either provide \"file\" or \"registry\" for credential spec") + } + } + + if c.Privileges.SELinuxContext != nil { + containerSpec.Privileges.SELinuxContext = &swarmapi.Privileges_SELinuxContext{ + Disable: c.Privileges.SELinuxContext.Disable, + User: c.Privileges.SELinuxContext.User, + Type: c.Privileges.SELinuxContext.Type, + Role: c.Privileges.SELinuxContext.Role, + Level: c.Privileges.SELinuxContext.Level, + } + } + } + + // Mounts + for _, m := range c.Mounts { + mount := swarmapi.Mount{ + Target: m.Target, + Source: m.Source, + ReadOnly: m.ReadOnly, + } + + if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok { + mount.Type = swarmapi.Mount_MountType(mountType) + } else if string(m.Type) != "" { + return nil, fmt.Errorf("invalid MountType: %q", m.Type) + } + + if m.BindOptions != nil { + if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok { + mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)} + } else if string(m.BindOptions.Propagation) != "" { + return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation) + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &swarmapi.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &swarmapi.Mount_TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigToGRPC(c.Healthcheck) + } + + return containerSpec, nil +} + +func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig { + interval, _ := gogotypes.DurationFromProto(h.Interval) + timeout, _ := gogotypes.DurationFromProto(h.Timeout) + startPeriod, _ := gogotypes.DurationFromProto(h.StartPeriod) + return &container.HealthConfig{ + Test: h.Test, + Interval: interval, + Timeout: timeout, + Retries: int(h.Retries), + StartPeriod: startPeriod, + } +} + +func healthConfigToGRPC(h *container.HealthConfig) *swarmapi.HealthConfig { + return &swarmapi.HealthConfig{ + Test: h.Test, + Interval: gogotypes.DurationProto(h.Interval), + Timeout: gogotypes.DurationProto(h.Timeout), + Retries: int32(h.Retries), + StartPeriod: gogotypes.DurationProto(h.StartPeriod), + } +} diff --git a/components/engine/daemon/cluster/convert/network.go b/components/engine/daemon/cluster/convert/network.go new file mode 100644 index 0000000000..143869a117 --- /dev/null +++ b/components/engine/daemon/cluster/convert/network.go @@ -0,0 +1,237 @@ +package convert + +import ( + "strings" + + basictypes "github.com/docker/docker/api/types" + networktypes "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + netconst "github.com/docker/libnetwork/datastore" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +func networkAttachmentFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment { + if na != nil { + return types.NetworkAttachment{ + Network: networkFromGRPC(na.Network), + Addresses: na.Addresses, + } + } + return types.NetworkAttachment{} +} + +func networkFromGRPC(n *swarmapi.Network) types.Network { + if n != nil { + network := types.Network{ + ID: n.ID, + Spec: types.NetworkSpec{ + IPv6Enabled: n.Spec.Ipv6Enabled, + Internal: n.Spec.Internal, + Attachable: n.Spec.Attachable, + Ingress: IsIngressNetwork(n), + IPAMOptions: ipamFromGRPC(n.Spec.IPAM), + Scope: netconst.SwarmScope, + }, + IPAMOptions: ipamFromGRPC(n.IPAM), + } + + if n.Spec.ConfigFrom != "" { + network.Spec.ConfigFrom = &networktypes.ConfigReference{ + Network: n.Spec.ConfigFrom, + } + } + + // Meta + network.Version.Index = n.Meta.Version.Index + network.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) + network.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt) + + //Annotations + network.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations) + + //DriverConfiguration + if n.Spec.DriverConfig != nil { + network.Spec.DriverConfiguration = &types.Driver{ + Name: n.Spec.DriverConfig.Name, + Options: n.Spec.DriverConfig.Options, + } + } + + //DriverState + if n.DriverState != nil { + network.DriverState = types.Driver{ + Name: n.DriverState.Name, + Options: n.DriverState.Options, + } + } + + return network + } + return types.Network{} +} + +func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions { + var ipam *types.IPAMOptions + if i != nil { + ipam = &types.IPAMOptions{} + if i.Driver != nil { + ipam.Driver.Name = i.Driver.Name + ipam.Driver.Options = i.Driver.Options + } + + for _, config := range i.Configs { + ipam.Configs = append(ipam.Configs, types.IPAMConfig{ + Subnet: config.Subnet, + Range: config.Range, + Gateway: config.Gateway, + }) + } + } + return ipam +} + +func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec { + var endpointSpec *types.EndpointSpec + if es != nil { + endpointSpec = &types.EndpointSpec{} + endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String())) + + for _, portState := range es.Ports { + endpointSpec.Ports = append(endpointSpec.Ports, swarmPortConfigToAPIPortConfig(portState)) + } + } + return endpointSpec +} + +func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint { + endpoint := types.Endpoint{} + if e != nil { + if espec := endpointSpecFromGRPC(e.Spec); espec != nil { + endpoint.Spec = *espec + } + + for _, portState := range e.Ports { + endpoint.Ports = append(endpoint.Ports, swarmPortConfigToAPIPortConfig(portState)) + } + + for _, v := range e.VirtualIPs { + endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{ + NetworkID: v.NetworkID, + Addr: v.Addr}) + } + + } + + return endpoint +} + +func swarmPortConfigToAPIPortConfig(portConfig *swarmapi.PortConfig) types.PortConfig { + return types.PortConfig{ + Name: portConfig.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portConfig.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portConfig.PublishMode)])), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + } +} + +// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource. +func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource { + spec := n.Spec + var ipam networktypes.IPAM + if spec.IPAM != nil { + if spec.IPAM.Driver != nil { + ipam.Driver = spec.IPAM.Driver.Name + ipam.Options = spec.IPAM.Driver.Options + } + ipam.Config = make([]networktypes.IPAMConfig, 0, len(spec.IPAM.Configs)) + for _, ic := range spec.IPAM.Configs { + ipamConfig := networktypes.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + AuxAddress: ic.Reserved, + } + ipam.Config = append(ipam.Config, ipamConfig) + } + } + + nr := basictypes.NetworkResource{ + ID: n.ID, + Name: n.Spec.Annotations.Name, + Scope: netconst.SwarmScope, + EnableIPv6: spec.Ipv6Enabled, + IPAM: ipam, + Internal: spec.Internal, + Attachable: spec.Attachable, + Ingress: IsIngressNetwork(&n), + Labels: n.Spec.Annotations.Labels, + } + + if n.Spec.ConfigFrom != "" { + nr.ConfigFrom = networktypes.ConfigReference{ + Network: n.Spec.ConfigFrom, + } + } + + if n.DriverState != nil { + nr.Driver = n.DriverState.Name + nr.Options = n.DriverState.Options + } + + return nr +} + +// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec. +func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec { + ns := swarmapi.NetworkSpec{ + Annotations: swarmapi.Annotations{ + Name: create.Name, + Labels: create.Labels, + }, + DriverConfig: &swarmapi.Driver{ + Name: create.Driver, + Options: create.Options, + }, + Ipv6Enabled: create.EnableIPv6, + Internal: create.Internal, + Attachable: create.Attachable, + Ingress: create.Ingress, + } + if create.IPAM != nil { + driver := create.IPAM.Driver + if driver == "" { + driver = "default" + } + ns.IPAM = &swarmapi.IPAMOptions{ + Driver: &swarmapi.Driver{ + Name: driver, + Options: create.IPAM.Options, + }, + } + ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config)) + for _, ipamConfig := range create.IPAM.Config { + ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{ + Subnet: ipamConfig.Subnet, + Range: ipamConfig.IPRange, + Gateway: ipamConfig.Gateway, + }) + } + ns.IPAM.Configs = ipamSpec + } + if create.ConfigFrom != nil { + ns.ConfigFrom = create.ConfigFrom.Network + } + return ns +} + +// IsIngressNetwork check if the swarm network is an ingress network +func IsIngressNetwork(n *swarmapi.Network) bool { + if n.Spec.Ingress { + return true + } + // Check if legacy defined ingress network + _, ok := n.Spec.Annotations.Labels["com.docker.swarm.internal"] + return ok && n.Spec.Annotations.Name == "ingress" +} diff --git a/components/engine/daemon/cluster/convert/node.go b/components/engine/daemon/cluster/convert/node.go new file mode 100644 index 0000000000..f075783e88 --- /dev/null +++ b/components/engine/daemon/cluster/convert/node.go @@ -0,0 +1,93 @@ +package convert + +import ( + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// NodeFromGRPC converts a grpc Node to a Node. +func NodeFromGRPC(n swarmapi.Node) types.Node { + node := types.Node{ + ID: n.ID, + Spec: types.NodeSpec{ + Role: types.NodeRole(strings.ToLower(n.Spec.DesiredRole.String())), + Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())), + }, + Status: types.NodeStatus{ + State: types.NodeState(strings.ToLower(n.Status.State.String())), + Message: n.Status.Message, + Addr: n.Status.Addr, + }, + } + + // Meta + node.Version.Index = n.Meta.Version.Index + node.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) + node.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt) + + //Annotations + node.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations) + + //Description + if n.Description != nil { + node.Description.Hostname = n.Description.Hostname + if n.Description.Platform != nil { + node.Description.Platform.Architecture = n.Description.Platform.Architecture + node.Description.Platform.OS = n.Description.Platform.OS + } + if n.Description.Resources != nil { + node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs + node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes + } + if n.Description.Engine != nil { + node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion + node.Description.Engine.Labels = n.Description.Engine.Labels + for _, plugin := range n.Description.Engine.Plugins { + node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name}) + } + } + if n.Description.TLSInfo != nil { + node.Description.TLSInfo.TrustRoot = string(n.Description.TLSInfo.TrustRoot) + node.Description.TLSInfo.CertIssuerPublicKey = n.Description.TLSInfo.CertIssuerPublicKey + node.Description.TLSInfo.CertIssuerSubject = n.Description.TLSInfo.CertIssuerSubject + } + } + + //Manager + if n.ManagerStatus != nil { + node.ManagerStatus = &types.ManagerStatus{ + Leader: n.ManagerStatus.Leader, + Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Reachability.String())), + Addr: n.ManagerStatus.Addr, + } + } + + return node +} + +// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec. +func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) { + spec := swarmapi.NodeSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + } + if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok { + spec.DesiredRole = swarmapi.NodeRole(role) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role) + } + + if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok { + spec.Availability = swarmapi.NodeSpec_Availability(availability) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability) + } + + return spec, nil +} diff --git a/components/engine/daemon/cluster/convert/secret.go b/components/engine/daemon/cluster/convert/secret.go new file mode 100644 index 0000000000..91d67736ad --- /dev/null +++ b/components/engine/daemon/cluster/convert/secret.go @@ -0,0 +1,61 @@ +package convert + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// SecretFromGRPC converts a grpc Secret to a Secret. +func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret { + secret := swarmtypes.Secret{ + ID: s.ID, + Spec: swarmtypes.SecretSpec{ + Annotations: annotationsFromGRPC(s.Spec.Annotations), + Data: s.Spec.Data, + }, + } + + secret.Version.Index = s.Meta.Version.Index + // Meta + secret.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + secret.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + return secret +} + +// SecretSpecToGRPC converts Secret to a grpc Secret. +func SecretSpecToGRPC(s swarmtypes.SecretSpec) swarmapi.SecretSpec { + return swarmapi.SecretSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + Data: s.Data, + } +} + +// SecretReferencesFromGRPC converts a slice of grpc SecretReference to SecretReference +func SecretReferencesFromGRPC(s []*swarmapi.SecretReference) []*swarmtypes.SecretReference { + refs := []*swarmtypes.SecretReference{} + + for _, r := range s { + ref := &swarmtypes.SecretReference{ + SecretID: r.SecretID, + SecretName: r.SecretName, + } + + if t, ok := r.Target.(*swarmapi.SecretReference_File); ok { + ref.File = &swarmtypes.SecretReferenceFileTarget{ + Name: t.File.Name, + UID: t.File.UID, + GID: t.File.GID, + Mode: t.File.Mode, + } + } + + refs = append(refs, ref) + } + + return refs +} diff --git a/components/engine/daemon/cluster/convert/service.go b/components/engine/daemon/cluster/convert/service.go new file mode 100644 index 0000000000..2604259b24 --- /dev/null +++ b/components/engine/daemon/cluster/convert/service.go @@ -0,0 +1,534 @@ +package convert + +import ( + "errors" + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/namesgenerator" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +var ( + // ErrUnsupportedRuntime returns an error if the runtime is not supported by the daemon + ErrUnsupportedRuntime = errors.New("unsupported runtime") +) + +// ServiceFromGRPC converts a grpc Service to a Service. +func ServiceFromGRPC(s swarmapi.Service) (types.Service, error) { + curSpec, err := serviceSpecFromGRPC(&s.Spec) + if err != nil { + return types.Service{}, err + } + prevSpec, err := serviceSpecFromGRPC(s.PreviousSpec) + if err != nil { + return types.Service{}, err + } + service := types.Service{ + ID: s.ID, + Spec: *curSpec, + PreviousSpec: prevSpec, + + Endpoint: endpointFromGRPC(s.Endpoint), + } + + // Meta + service.Version.Index = s.Meta.Version.Index + service.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + service.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + // UpdateStatus + if s.UpdateStatus != nil { + service.UpdateStatus = &types.UpdateStatus{} + switch s.UpdateStatus.State { + case swarmapi.UpdateStatus_UPDATING: + service.UpdateStatus.State = types.UpdateStateUpdating + case swarmapi.UpdateStatus_PAUSED: + service.UpdateStatus.State = types.UpdateStatePaused + case swarmapi.UpdateStatus_COMPLETED: + service.UpdateStatus.State = types.UpdateStateCompleted + case swarmapi.UpdateStatus_ROLLBACK_STARTED: + service.UpdateStatus.State = types.UpdateStateRollbackStarted + case swarmapi.UpdateStatus_ROLLBACK_PAUSED: + service.UpdateStatus.State = types.UpdateStateRollbackPaused + case swarmapi.UpdateStatus_ROLLBACK_COMPLETED: + service.UpdateStatus.State = types.UpdateStateRollbackCompleted + } + + startedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.StartedAt) + if !startedAt.IsZero() && startedAt.Unix() != 0 { + service.UpdateStatus.StartedAt = &startedAt + } + + completedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.CompletedAt) + if !completedAt.IsZero() && completedAt.Unix() != 0 { + service.UpdateStatus.CompletedAt = &completedAt + } + + service.UpdateStatus.Message = s.UpdateStatus.Message + } + + return service, nil +} + +func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) (*types.ServiceSpec, error) { + if spec == nil { + return nil, nil + } + + serviceNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) + for _, n := range spec.Networks { + netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} + serviceNetworks = append(serviceNetworks, netConfig) + + } + + taskTemplate := taskSpecFromGRPC(spec.Task) + + switch t := spec.Task.GetRuntime().(type) { + case *swarmapi.TaskSpec_Container: + containerConfig := t.Container + taskTemplate.ContainerSpec = containerSpecFromGRPC(containerConfig) + taskTemplate.Runtime = types.RuntimeContainer + case *swarmapi.TaskSpec_Generic: + switch t.Generic.Kind { + case string(types.RuntimePlugin): + taskTemplate.Runtime = types.RuntimePlugin + default: + return nil, fmt.Errorf("unknown task runtime type: %s", t.Generic.Payload.TypeUrl) + } + + taskTemplate.RuntimeData = t.Generic.Payload.Value + default: + return nil, fmt.Errorf("error creating service; unsupported runtime %T", t) + } + + convertedSpec := &types.ServiceSpec{ + Annotations: annotationsFromGRPC(spec.Annotations), + TaskTemplate: taskTemplate, + Networks: serviceNetworks, + EndpointSpec: endpointSpecFromGRPC(spec.Endpoint), + } + + // UpdateConfig + convertedSpec.UpdateConfig = updateConfigFromGRPC(spec.Update) + convertedSpec.RollbackConfig = updateConfigFromGRPC(spec.Rollback) + + // Mode + switch t := spec.GetMode().(type) { + case *swarmapi.ServiceSpec_Global: + convertedSpec.Mode.Global = &types.GlobalService{} + case *swarmapi.ServiceSpec_Replicated: + convertedSpec.Mode.Replicated = &types.ReplicatedService{ + Replicas: &t.Replicated.Replicas, + } + } + + return convertedSpec, nil +} + +// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec. +func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { + name := s.Name + if name == "" { + name = namesgenerator.GetRandomName(0) + } + + serviceNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.Networks)) + for _, n := range s.Networks { + netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} + serviceNetworks = append(serviceNetworks, netConfig) + } + + taskNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.TaskTemplate.Networks)) + for _, n := range s.TaskTemplate.Networks { + netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} + taskNetworks = append(taskNetworks, netConfig) + + } + + spec := swarmapi.ServiceSpec{ + Annotations: swarmapi.Annotations{ + Name: name, + Labels: s.Labels, + }, + Task: swarmapi.TaskSpec{ + Resources: resourcesToGRPC(s.TaskTemplate.Resources), + LogDriver: driverToGRPC(s.TaskTemplate.LogDriver), + Networks: taskNetworks, + ForceUpdate: s.TaskTemplate.ForceUpdate, + }, + Networks: serviceNetworks, + } + + switch s.TaskTemplate.Runtime { + case types.RuntimeContainer, "": // if empty runtime default to container + containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} + case types.RuntimePlugin: + spec.Task.Runtime = &swarmapi.TaskSpec_Generic{ + Generic: &swarmapi.GenericRuntimeSpec{ + Kind: string(types.RuntimePlugin), + Payload: &gogotypes.Any{ + TypeUrl: string(types.RuntimeURLPlugin), + Value: s.TaskTemplate.RuntimeData, + }, + }, + } + default: + return swarmapi.ServiceSpec{}, ErrUnsupportedRuntime + } + + restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Restart = restartPolicy + + if s.TaskTemplate.Placement != nil { + var preferences []*swarmapi.PlacementPreference + for _, pref := range s.TaskTemplate.Placement.Preferences { + if pref.Spread != nil { + preferences = append(preferences, &swarmapi.PlacementPreference{ + Preference: &swarmapi.PlacementPreference_Spread{ + Spread: &swarmapi.SpreadOver{ + SpreadDescriptor: pref.Spread.SpreadDescriptor, + }, + }, + }) + } + } + var platforms []*swarmapi.Platform + for _, plat := range s.TaskTemplate.Placement.Platforms { + platforms = append(platforms, &swarmapi.Platform{ + Architecture: plat.Architecture, + OS: plat.OS, + }) + } + spec.Task.Placement = &swarmapi.Placement{ + Constraints: s.TaskTemplate.Placement.Constraints, + Preferences: preferences, + Platforms: platforms, + } + } + + spec.Update, err = updateConfigToGRPC(s.UpdateConfig) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Rollback, err = updateConfigToGRPC(s.RollbackConfig) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + + if s.EndpointSpec != nil { + if s.EndpointSpec.Mode != "" && + s.EndpointSpec.Mode != types.ResolutionModeVIP && + s.EndpointSpec.Mode != types.ResolutionModeDNSRR { + return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode) + } + + spec.Endpoint = &swarmapi.EndpointSpec{} + + spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))]) + + for _, portConfig := range s.EndpointSpec.Ports { + spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{ + Name: portConfig.Name, + Protocol: swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]), + PublishMode: swarmapi.PortConfig_PublishMode(swarmapi.PortConfig_PublishMode_value[strings.ToUpper(string(portConfig.PublishMode))]), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + } + + // Mode + if s.Mode.Global != nil && s.Mode.Replicated != nil { + return swarmapi.ServiceSpec{}, fmt.Errorf("cannot specify both replicated mode and global mode") + } + + if s.Mode.Global != nil { + spec.Mode = &swarmapi.ServiceSpec_Global{ + Global: &swarmapi.GlobalService{}, + } + } else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas}, + } + } else { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: 1}, + } + } + + return spec, nil +} + +func annotationsFromGRPC(ann swarmapi.Annotations) types.Annotations { + a := types.Annotations{ + Name: ann.Name, + Labels: ann.Labels, + } + + if a.Labels == nil { + a.Labels = make(map[string]string) + } + + return a +} + +func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements { + var resources *types.ResourceRequirements + if res != nil { + resources = &types.ResourceRequirements{} + if res.Limits != nil { + resources.Limits = &types.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + resources.Reservations = &types.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + } + } + } + + return resources +} + +func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements { + var reqs *swarmapi.ResourceRequirements + if res != nil { + reqs = &swarmapi.ResourceRequirements{} + if res.Limits != nil { + reqs.Limits = &swarmapi.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + reqs.Reservations = &swarmapi.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + } + + } + } + return reqs +} + +func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy { + var rp *types.RestartPolicy + if p != nil { + rp = &types.RestartPolicy{} + + switch p.Condition { + case swarmapi.RestartOnNone: + rp.Condition = types.RestartPolicyConditionNone + case swarmapi.RestartOnFailure: + rp.Condition = types.RestartPolicyConditionOnFailure + case swarmapi.RestartOnAny: + rp.Condition = types.RestartPolicyConditionAny + default: + rp.Condition = types.RestartPolicyConditionAny + } + + if p.Delay != nil { + delay, _ := gogotypes.DurationFromProto(p.Delay) + rp.Delay = &delay + } + if p.Window != nil { + window, _ := gogotypes.DurationFromProto(p.Window) + rp.Window = &window + } + + rp.MaxAttempts = &p.MaxAttempts + } + return rp +} + +func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) { + var rp *swarmapi.RestartPolicy + if p != nil { + rp = &swarmapi.RestartPolicy{} + + switch p.Condition { + case types.RestartPolicyConditionNone: + rp.Condition = swarmapi.RestartOnNone + case types.RestartPolicyConditionOnFailure: + rp.Condition = swarmapi.RestartOnFailure + case types.RestartPolicyConditionAny: + rp.Condition = swarmapi.RestartOnAny + default: + if string(p.Condition) != "" { + return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition) + } + rp.Condition = swarmapi.RestartOnAny + } + + if p.Delay != nil { + rp.Delay = gogotypes.DurationProto(*p.Delay) + } + if p.Window != nil { + rp.Window = gogotypes.DurationProto(*p.Window) + } + if p.MaxAttempts != nil { + rp.MaxAttempts = *p.MaxAttempts + + } + } + return rp, nil +} + +func placementFromGRPC(p *swarmapi.Placement) *types.Placement { + if p == nil { + return nil + } + r := &types.Placement{ + Constraints: p.Constraints, + } + + for _, pref := range p.Preferences { + if spread := pref.GetSpread(); spread != nil { + r.Preferences = append(r.Preferences, types.PlacementPreference{ + Spread: &types.SpreadOver{ + SpreadDescriptor: spread.SpreadDescriptor, + }, + }) + } + } + + for _, plat := range p.Platforms { + r.Platforms = append(r.Platforms, types.Platform{ + Architecture: plat.Architecture, + OS: plat.OS, + }) + } + + return r +} + +func driverFromGRPC(p *swarmapi.Driver) *types.Driver { + if p == nil { + return nil + } + + return &types.Driver{ + Name: p.Name, + Options: p.Options, + } +} + +func driverToGRPC(p *types.Driver) *swarmapi.Driver { + if p == nil { + return nil + } + + return &swarmapi.Driver{ + Name: p.Name, + Options: p.Options, + } +} + +func updateConfigFromGRPC(updateConfig *swarmapi.UpdateConfig) *types.UpdateConfig { + if updateConfig == nil { + return nil + } + + converted := &types.UpdateConfig{ + Parallelism: updateConfig.Parallelism, + MaxFailureRatio: updateConfig.MaxFailureRatio, + } + + converted.Delay = updateConfig.Delay + if updateConfig.Monitor != nil { + converted.Monitor, _ = gogotypes.DurationFromProto(updateConfig.Monitor) + } + + switch updateConfig.FailureAction { + case swarmapi.UpdateConfig_PAUSE: + converted.FailureAction = types.UpdateFailureActionPause + case swarmapi.UpdateConfig_CONTINUE: + converted.FailureAction = types.UpdateFailureActionContinue + case swarmapi.UpdateConfig_ROLLBACK: + converted.FailureAction = types.UpdateFailureActionRollback + } + + switch updateConfig.Order { + case swarmapi.UpdateConfig_STOP_FIRST: + converted.Order = types.UpdateOrderStopFirst + case swarmapi.UpdateConfig_START_FIRST: + converted.Order = types.UpdateOrderStartFirst + } + + return converted +} + +func updateConfigToGRPC(updateConfig *types.UpdateConfig) (*swarmapi.UpdateConfig, error) { + if updateConfig == nil { + return nil, nil + } + + converted := &swarmapi.UpdateConfig{ + Parallelism: updateConfig.Parallelism, + Delay: updateConfig.Delay, + MaxFailureRatio: updateConfig.MaxFailureRatio, + } + + switch updateConfig.FailureAction { + case types.UpdateFailureActionPause, "": + converted.FailureAction = swarmapi.UpdateConfig_PAUSE + case types.UpdateFailureActionContinue: + converted.FailureAction = swarmapi.UpdateConfig_CONTINUE + case types.UpdateFailureActionRollback: + converted.FailureAction = swarmapi.UpdateConfig_ROLLBACK + default: + return nil, fmt.Errorf("unrecognized update failure action %s", updateConfig.FailureAction) + } + if updateConfig.Monitor != 0 { + converted.Monitor = gogotypes.DurationProto(updateConfig.Monitor) + } + + switch updateConfig.Order { + case types.UpdateOrderStopFirst, "": + converted.Order = swarmapi.UpdateConfig_STOP_FIRST + case types.UpdateOrderStartFirst: + converted.Order = swarmapi.UpdateConfig_START_FIRST + default: + return nil, fmt.Errorf("unrecognized update order %s", updateConfig.Order) + } + + return converted, nil +} + +func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) types.TaskSpec { + taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(taskSpec.Networks)) + for _, n := range taskSpec.Networks { + netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} + taskNetworks = append(taskNetworks, netConfig) + } + + c := taskSpec.GetContainer() + cSpec := types.ContainerSpec{} + if c != nil { + cSpec = containerSpecFromGRPC(c) + } + + return types.TaskSpec{ + ContainerSpec: cSpec, + Resources: resourcesFromGRPC(taskSpec.Resources), + RestartPolicy: restartPolicyFromGRPC(taskSpec.Restart), + Placement: placementFromGRPC(taskSpec.Placement), + LogDriver: driverFromGRPC(taskSpec.LogDriver), + Networks: taskNetworks, + ForceUpdate: taskSpec.ForceUpdate, + } +} diff --git a/components/engine/daemon/cluster/convert/service_test.go b/components/engine/daemon/cluster/convert/service_test.go new file mode 100644 index 0000000000..92d80d3323 --- /dev/null +++ b/components/engine/daemon/cluster/convert/service_test.go @@ -0,0 +1,148 @@ +package convert + +import ( + "testing" + + swarmtypes "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + google_protobuf3 "github.com/gogo/protobuf/types" +) + +func TestServiceConvertFromGRPCRuntimeContainer(t *testing.T) { + gs := swarmapi.Service{ + Meta: swarmapi.Meta{ + Version: swarmapi.Version{ + Index: 1, + }, + CreatedAt: nil, + UpdatedAt: nil, + }, + SpecVersion: &swarmapi.Version{ + Index: 1, + }, + Spec: swarmapi.ServiceSpec{ + Task: swarmapi.TaskSpec{ + Runtime: &swarmapi.TaskSpec_Container{ + Container: &swarmapi.ContainerSpec{ + Image: "alpine:latest", + }, + }, + }, + }, + } + + svc, err := ServiceFromGRPC(gs) + if err != nil { + t.Fatal(err) + } + + if svc.Spec.TaskTemplate.Runtime != swarmtypes.RuntimeContainer { + t.Fatalf("expected type %s; received %T", swarmtypes.RuntimeContainer, svc.Spec.TaskTemplate.Runtime) + } +} + +func TestServiceConvertFromGRPCGenericRuntimePlugin(t *testing.T) { + kind := string(swarmtypes.RuntimePlugin) + url := swarmtypes.RuntimeURLPlugin + gs := swarmapi.Service{ + Meta: swarmapi.Meta{ + Version: swarmapi.Version{ + Index: 1, + }, + CreatedAt: nil, + UpdatedAt: nil, + }, + SpecVersion: &swarmapi.Version{ + Index: 1, + }, + Spec: swarmapi.ServiceSpec{ + Task: swarmapi.TaskSpec{ + Runtime: &swarmapi.TaskSpec_Generic{ + Generic: &swarmapi.GenericRuntimeSpec{ + Kind: kind, + Payload: &google_protobuf3.Any{ + TypeUrl: string(url), + }, + }, + }, + }, + }, + } + + svc, err := ServiceFromGRPC(gs) + if err != nil { + t.Fatal(err) + } + + if svc.Spec.TaskTemplate.Runtime != swarmtypes.RuntimePlugin { + t.Fatalf("expected type %s; received %T", swarmtypes.RuntimePlugin, svc.Spec.TaskTemplate.Runtime) + } +} + +func TestServiceConvertToGRPCGenericRuntimePlugin(t *testing.T) { + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + Runtime: swarmtypes.RuntimePlugin, + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + svc, err := ServiceSpecToGRPC(s) + if err != nil { + t.Fatal(err) + } + + v, ok := svc.Task.Runtime.(*swarmapi.TaskSpec_Generic) + if !ok { + t.Fatal("expected type swarmapi.TaskSpec_Generic") + } + + if v.Generic.Payload.TypeUrl != string(swarmtypes.RuntimeURLPlugin) { + t.Fatalf("expected url %s; received %s", swarmtypes.RuntimeURLPlugin, v.Generic.Payload.TypeUrl) + } +} + +func TestServiceConvertToGRPCContainerRuntime(t *testing.T) { + image := "alpine:latest" + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + ContainerSpec: swarmtypes.ContainerSpec{ + Image: image, + }, + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + svc, err := ServiceSpecToGRPC(s) + if err != nil { + t.Fatal(err) + } + + v, ok := svc.Task.Runtime.(*swarmapi.TaskSpec_Container) + if !ok { + t.Fatal("expected type swarmapi.TaskSpec_Container") + } + + if v.Container.Image != image { + t.Fatalf("expected image %s; received %s", image, v.Container.Image) + } +} + +func TestServiceConvertToGRPCGenericRuntimeCustom(t *testing.T) { + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + Runtime: "customruntime", + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + if _, err := ServiceSpecToGRPC(s); err != ErrUnsupportedRuntime { + t.Fatal(err) + } +} diff --git a/components/engine/daemon/cluster/convert/swarm.go b/components/engine/daemon/cluster/convert/swarm.go new file mode 100644 index 0000000000..0d5c8738c9 --- /dev/null +++ b/components/engine/daemon/cluster/convert/swarm.go @@ -0,0 +1,147 @@ +package convert + +import ( + "fmt" + "strings" + "time" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + gogotypes "github.com/gogo/protobuf/types" +) + +// SwarmFromGRPC converts a grpc Cluster to a Swarm. +func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { + swarm := types.Swarm{ + ClusterInfo: types.ClusterInfo{ + ID: c.ID, + Spec: types.Spec{ + Orchestration: types.OrchestrationConfig{ + TaskHistoryRetentionLimit: &c.Spec.Orchestration.TaskHistoryRetentionLimit, + }, + Raft: types.RaftConfig{ + SnapshotInterval: c.Spec.Raft.SnapshotInterval, + KeepOldSnapshots: &c.Spec.Raft.KeepOldSnapshots, + LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers, + HeartbeatTick: int(c.Spec.Raft.HeartbeatTick), + ElectionTick: int(c.Spec.Raft.ElectionTick), + }, + EncryptionConfig: types.EncryptionConfig{ + AutoLockManagers: c.Spec.EncryptionConfig.AutoLockManagers, + }, + CAConfig: types.CAConfig{ + // do not include the signing CA key (it should already be redacted via the swarm APIs) + SigningCACert: string(c.Spec.CAConfig.SigningCACert), + ForceRotate: c.Spec.CAConfig.ForceRotate, + }, + }, + TLSInfo: types.TLSInfo{ + TrustRoot: string(c.RootCA.CACert), + }, + RootRotationInProgress: c.RootCA.RootRotation != nil, + }, + JoinTokens: types.JoinTokens{ + Worker: c.RootCA.JoinTokens.Worker, + Manager: c.RootCA.JoinTokens.Manager, + }, + } + + issuerInfo, err := ca.IssuerFromAPIRootCA(&c.RootCA) + if err == nil && issuerInfo != nil { + swarm.TLSInfo.CertIssuerSubject = issuerInfo.Subject + swarm.TLSInfo.CertIssuerPublicKey = issuerInfo.PublicKey + } + + heartbeatPeriod, _ := gogotypes.DurationFromProto(c.Spec.Dispatcher.HeartbeatPeriod) + swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod + + swarm.Spec.CAConfig.NodeCertExpiry, _ = gogotypes.DurationFromProto(c.Spec.CAConfig.NodeCertExpiry) + + for _, ca := range c.Spec.CAConfig.ExternalCAs { + swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{ + Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), + URL: ca.URL, + Options: ca.Options, + CACert: string(ca.CACert), + }) + } + + // Meta + swarm.Version.Index = c.Meta.Version.Index + swarm.CreatedAt, _ = gogotypes.TimestampFromProto(c.Meta.CreatedAt) + swarm.UpdatedAt, _ = gogotypes.TimestampFromProto(c.Meta.UpdatedAt) + + // Annotations + swarm.Spec.Annotations = annotationsFromGRPC(c.Spec.Annotations) + + return swarm +} + +// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec. +func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) { + return MergeSwarmSpecToGRPC(s, swarmapi.ClusterSpec{}) +} + +// MergeSwarmSpecToGRPC merges a Spec with an initial grpc ClusterSpec +func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) { + // We take the initSpec (either created from scratch, or returned by swarmkit), + // and will only change the value if the one taken from types.Spec is not nil or 0. + // In other words, if the value taken from types.Spec is nil or 0, we will maintain the status quo. + if s.Annotations.Name != "" { + spec.Annotations.Name = s.Annotations.Name + } + if len(s.Annotations.Labels) != 0 { + spec.Annotations.Labels = s.Annotations.Labels + } + + if s.Orchestration.TaskHistoryRetentionLimit != nil { + spec.Orchestration.TaskHistoryRetentionLimit = *s.Orchestration.TaskHistoryRetentionLimit + } + if s.Raft.SnapshotInterval != 0 { + spec.Raft.SnapshotInterval = s.Raft.SnapshotInterval + } + if s.Raft.KeepOldSnapshots != nil { + spec.Raft.KeepOldSnapshots = *s.Raft.KeepOldSnapshots + } + if s.Raft.LogEntriesForSlowFollowers != 0 { + spec.Raft.LogEntriesForSlowFollowers = s.Raft.LogEntriesForSlowFollowers + } + if s.Raft.HeartbeatTick != 0 { + spec.Raft.HeartbeatTick = uint32(s.Raft.HeartbeatTick) + } + if s.Raft.ElectionTick != 0 { + spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick) + } + if s.Dispatcher.HeartbeatPeriod != 0 { + spec.Dispatcher.HeartbeatPeriod = gogotypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod)) + } + if s.CAConfig.NodeCertExpiry != 0 { + spec.CAConfig.NodeCertExpiry = gogotypes.DurationProto(s.CAConfig.NodeCertExpiry) + } + if s.CAConfig.SigningCACert != "" { + spec.CAConfig.SigningCACert = []byte(s.CAConfig.SigningCACert) + } + if s.CAConfig.SigningCAKey != "" { + // do propagate the signing CA key here because we want to provide it TO the swarm APIs + spec.CAConfig.SigningCAKey = []byte(s.CAConfig.SigningCAKey) + } + spec.CAConfig.ForceRotate = s.CAConfig.ForceRotate + + for _, ca := range s.CAConfig.ExternalCAs { + protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))] + if !ok { + return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol) + } + spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{ + Protocol: swarmapi.ExternalCA_CAProtocol(protocol), + URL: ca.URL, + Options: ca.Options, + CACert: []byte(ca.CACert), + }) + } + + spec.EncryptionConfig.AutoLockManagers = s.EncryptionConfig.AutoLockManagers + + return spec, nil +} diff --git a/components/engine/daemon/cluster/convert/task.go b/components/engine/daemon/cluster/convert/task.go new file mode 100644 index 0000000000..b90d24e359 --- /dev/null +++ b/components/engine/daemon/cluster/convert/task.go @@ -0,0 +1,66 @@ +package convert + +import ( + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// TaskFromGRPC converts a grpc Task to a Task. +func TaskFromGRPC(t swarmapi.Task) types.Task { + if t.Spec.GetAttachment() != nil { + return types.Task{} + } + containerStatus := t.Status.GetContainer() + + task := types.Task{ + ID: t.ID, + Annotations: annotationsFromGRPC(t.Annotations), + ServiceID: t.ServiceID, + Slot: int(t.Slot), + NodeID: t.NodeID, + Spec: taskSpecFromGRPC(t.Spec), + Status: types.TaskStatus{ + State: types.TaskState(strings.ToLower(t.Status.State.String())), + Message: t.Status.Message, + Err: t.Status.Err, + }, + DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), + } + + // Meta + task.Version.Index = t.Meta.Version.Index + task.CreatedAt, _ = gogotypes.TimestampFromProto(t.Meta.CreatedAt) + task.UpdatedAt, _ = gogotypes.TimestampFromProto(t.Meta.UpdatedAt) + + task.Status.Timestamp, _ = gogotypes.TimestampFromProto(t.Status.Timestamp) + + if containerStatus != nil { + task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID + task.Status.ContainerStatus.PID = int(containerStatus.PID) + task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode) + } + + // NetworksAttachments + for _, na := range t.Networks { + task.NetworksAttachments = append(task.NetworksAttachments, networkAttachmentFromGRPC(na)) + } + + if t.Status.PortStatus == nil { + return task + } + + for _, p := range t.Status.PortStatus.Ports { + task.Status.PortStatus.Ports = append(task.Status.PortStatus.Ports, types.PortConfig{ + Name: p.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(p.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(p.PublishMode)])), + TargetPort: p.TargetPort, + PublishedPort: p.PublishedPort, + }) + } + + return task +} diff --git a/components/engine/daemon/cluster/executor/backend.go b/components/engine/daemon/cluster/executor/backend.go new file mode 100644 index 0000000000..2578a93c5a --- /dev/null +++ b/components/engine/daemon/cluster/executor/backend.go @@ -0,0 +1,64 @@ +package executor + +import ( + "io" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + containerpkg "github.com/docker/docker/container" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/plugin" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/cluster" + networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent/exec" + "golang.org/x/net/context" +) + +// Backend defines the executor component for a swarm agent. +type Backend interface { + CreateManagedNetwork(clustertypes.NetworkCreateRequest) error + DeleteManagedNetwork(name string) error + FindNetwork(idName string) (libnetwork.Network, error) + SetupIngress(clustertypes.NetworkCreateRequest, string) (<-chan struct{}, error) + ReleaseIngress() (<-chan struct{}, error) + PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStop(name string, seconds *int) error + ContainerLogs(context.Context, string, *types.ContainerLogsOptions) (<-chan *backend.LogMessage, error) + ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error + ActivateContainerServiceBinding(containerName string) error + DeactivateContainerServiceBinding(containerName string) error + UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error + ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerKill(name string, sig uint64) error + SetContainerDependencyStore(name string, store exec.DependencyGetter) error + SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error + SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error + SystemInfo() (*types.Info, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + Containers(config *types.ContainerListOptions) ([]*types.Container, error) + SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error + DaemonJoinsCluster(provider cluster.Provider) + DaemonLeavesCluster() + IsSwarmCompatible() error + SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(listener chan interface{}) + UpdateAttachment(string, string, string, *network.NetworkingConfig) error + WaitForDetachment(context.Context, string, string, string, string) error + GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) + LookupImage(name string) (*types.ImageInspect, error) + PluginManager() *plugin.Manager + PluginGetter() *plugin.Store +} diff --git a/components/engine/daemon/cluster/executor/container/adapter.go b/components/engine/daemon/cluster/executor/container/adapter.go new file mode 100644 index 0000000000..67d42c706e --- /dev/null +++ b/components/engine/daemon/cluster/executor/container/adapter.go @@ -0,0 +1,464 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/libnetwork" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + gogotypes "github.com/gogo/protobuf/types" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" + "golang.org/x/time/rate" +) + +// containerAdapter conducts remote operations for a container. All calls +// are mostly naked calls to the client API, seeded with information from +// containerConfig. +type containerAdapter struct { + backend executorpkg.Backend + container *containerConfig + dependencies exec.DependencyGetter +} + +func newContainerAdapter(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*containerAdapter, error) { + ctnr, err := newContainerConfig(task) + if err != nil { + return nil, err + } + + return &containerAdapter{ + container: ctnr, + backend: b, + dependencies: dependencies, + }, nil +} + +func (c *containerAdapter) pullImage(ctx context.Context) error { + spec := c.container.spec() + + // Skip pulling if the image is referenced by image ID. + if _, err := digest.Parse(spec.Image); err == nil { + return nil + } + + // Skip pulling if the image is referenced by digest and already + // exists locally. + named, err := reference.ParseNormalizedNamed(spec.Image) + if err == nil { + if _, ok := named.(reference.Canonical); ok { + _, err := c.backend.LookupImage(spec.Image) + if err == nil { + return nil + } + } + } + + // if the image needs to be pulled, the auth config will be retrieved and updated + var encodedAuthConfig string + if spec.PullOptions != nil { + encodedAuthConfig = spec.PullOptions.RegistryAuth + } + + authConfig := &types.AuthConfig{} + if encodedAuthConfig != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + pr, pw := io.Pipe() + metaHeaders := map[string][]string{} + go func() { + err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw) + pw.CloseWithError(err) + }() + + dec := json.NewDecoder(pr) + dec.UseNumber() + m := map[string]interface{}{} + spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1) + + lastStatus := "" + for { + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + return err + } + l := log.G(ctx) + // limit pull progress logs unless the status changes + if spamLimiter.Allow() || lastStatus != m["status"] { + // if we have progress details, we have everything we need + if progress, ok := m["progressDetail"].(map[string]interface{}); ok { + // first, log the image and status + l = l.WithFields(logrus.Fields{ + "image": c.container.image(), + "status": m["status"], + }) + // then, if we have progress, log the progress + if progress["current"] != nil && progress["total"] != nil { + l = l.WithFields(logrus.Fields{ + "current": progress["current"], + "total": progress["total"], + }) + } + } + l.Debug("pull in progress") + } + // sometimes, we get no useful information at all, and add no fields + if status, ok := m["status"].(string); ok { + lastStatus = status + } + } + + // if the final stream object contained an error, return it + if errMsg, ok := m["error"]; ok { + return fmt.Errorf("%v", errMsg) + } + return nil +} + +func (c *containerAdapter) createNetworks(ctx context.Context) error { + for _, network := range c.container.networks() { + ncr, err := c.container.networkCreateRequest(network) + if err != nil { + return err + } + + if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing + if _, ok := err.(libnetwork.NetworkNameError); ok { + continue + } + + return err + } + } + + return nil +} + +func (c *containerAdapter) removeNetworks(ctx context.Context) error { + for _, nid := range c.container.networks() { + if err := c.backend.DeleteManagedNetwork(nid); err != nil { + switch err.(type) { + case *libnetwork.ActiveEndpointsError: + continue + case libnetwork.ErrNoSuchNetwork: + continue + default: + log.G(ctx).Errorf("network %s remove failed: %v", nid, err) + return err + } + } + } + + return nil +} + +func (c *containerAdapter) networkAttach(ctx context.Context) error { + config := c.container.createNetworkingConfig(c.backend) + + var ( + networkName string + networkID string + ) + + if config != nil { + for n, epConfig := range config.EndpointsConfig { + networkName = n + networkID = epConfig.NetworkID + break + } + } + + return c.backend.UpdateAttachment(networkName, networkID, c.container.id(), config) +} + +func (c *containerAdapter) waitForDetach(ctx context.Context) error { + config := c.container.createNetworkingConfig(c.backend) + + var ( + networkName string + networkID string + ) + + if config != nil { + for n, epConfig := range config.EndpointsConfig { + networkName = n + networkID = epConfig.NetworkID + break + } + } + + return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.id()) +} + +func (c *containerAdapter) create(ctx context.Context) error { + var cr containertypes.ContainerCreateCreatedBody + var err error + if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ + Name: c.container.name(), + Config: c.container.config(), + HostConfig: c.container.hostConfig(), + // Use the first network in container create + NetworkingConfig: c.container.createNetworkingConfig(c.backend), + }); err != nil { + return err + } + + // Docker daemon currently doesn't support multiple networks in container create + // Connect to all other networks + nc := c.container.connectNetworkingConfig(c.backend) + + if nc != nil { + for n, ep := range nc.EndpointsConfig { + if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil { + return err + } + } + } + + container := c.container.task.Spec.GetContainer() + if container == nil { + return errors.New("unable to get container from task spec") + } + + if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil { + return err + } + + // configure secrets + secretRefs := convert.SecretReferencesFromGRPC(container.Secrets) + if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil { + return err + } + + configRefs := convert.ConfigReferencesFromGRPC(container.Configs) + if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil { + return err + } + + if err := c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil { + return err + } + + return nil +} + +// checkMounts ensures that the provided mounts won't have any host-specific +// problems at start up. For example, we disallow bind mounts without an +// existing path, which slightly different from the container API. +func (c *containerAdapter) checkMounts() error { + spec := c.container.spec() + for _, mount := range spec.Mounts { + switch mount.Type { + case api.MountTypeBind: + if _, err := os.Stat(mount.Source); os.IsNotExist(err) { + return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source) + } + } + } + + return nil +} + +func (c *containerAdapter) start(ctx context.Context) error { + if err := c.checkMounts(); err != nil { + return err + } + + return c.backend.ContainerStart(c.container.name(), nil, "", "") +} + +func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { + cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) + if ctx.Err() != nil { + return types.ContainerJSON{}, ctx.Err() + } + if err != nil { + return types.ContainerJSON{}, err + } + return *cs, nil +} + +// events issues a call to the events API and returns a channel with all +// events. The stream of events can be shutdown by cancelling the context. +func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { + log.G(ctx).Debugf("waiting on events") + buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter()) + eventsq := make(chan events.Message, len(buffer)) + + for _, event := range buffer { + eventsq <- event + } + + go func() { + defer c.backend.UnsubscribeFromEvents(l) + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + log.G(ctx).Warnf("unexpected event message: %q", ev) + continue + } + select { + case eventsq <- jev: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() + + return eventsq +} + +func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) { + return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning) +} + +func (c *containerAdapter) shutdown(ctx context.Context) error { + // Default stop grace period to nil (daemon will use the stopTimeout of the container) + var stopgrace *int + spec := c.container.spec() + if spec.StopGracePeriod != nil { + stopgraceValue := int(spec.StopGracePeriod.Seconds) + stopgrace = &stopgraceValue + } + return c.backend.ContainerStop(c.container.name(), stopgrace) +} + +func (c *containerAdapter) terminate(ctx context.Context) error { + return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL)) +} + +func (c *containerAdapter) remove(ctx context.Context) error { + return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{ + RemoveVolume: true, + ForceRemove: true, + }) +} + +func (c *containerAdapter) createVolumes(ctx context.Context) error { + // Create plugin volumes that are embedded inside a Mount + for _, mount := range c.container.task.Spec.GetContainer().Mounts { + if mount.Type != api.MountTypeVolume { + continue + } + + if mount.VolumeOptions == nil { + continue + } + + if mount.VolumeOptions.DriverConfig == nil { + continue + } + + req := c.container.volumeCreateRequest(&mount) + + // Check if this volume exists on the engine + if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil { + // TODO(amitshukla): Today, volume create through the engine api does not return an error + // when the named volume with the same parameters already exists. + // It returns an error if the driver name is different - that is a valid error + return err + } + + } + + return nil +} + +func (c *containerAdapter) activateServiceBinding() error { + return c.backend.ActivateContainerServiceBinding(c.container.name()) +} + +func (c *containerAdapter) deactivateServiceBinding() error { + return c.backend.DeactivateContainerServiceBinding(c.container.name()) +} + +func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) { + apiOptions := &types.ContainerLogsOptions{ + Follow: options.Follow, + + // Always say yes to Timestamps and Details. we make the decision + // of whether to return these to the user or not way higher up the + // stack. + Timestamps: true, + Details: true, + } + + if options.Since != nil { + since, err := gogotypes.TimestampFromProto(options.Since) + if err != nil { + return nil, err + } + // print since as this formatted string because the docker container + // logs interface expects it like this. + // see github.com/docker/docker/api/types/time.ParseTimestamps + apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond())) + } + + if options.Tail < 0 { + // See protobuf documentation for details of how this works. + apiOptions.Tail = fmt.Sprint(-options.Tail - 1) + } else if options.Tail > 0 { + return nil, errors.New("tail relative to start of logs not supported via docker API") + } + + if len(options.Streams) == 0 { + // empty == all + apiOptions.ShowStdout, apiOptions.ShowStderr = true, true + } else { + for _, stream := range options.Streams { + switch stream { + case api.LogStreamStdout: + apiOptions.ShowStdout = true + case api.LogStreamStderr: + apiOptions.ShowStderr = true + } + } + } + msgs, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions) + if err != nil { + return nil, err + } + return msgs, nil +} + +// todo: typed/wrapped errors +func isContainerCreateNameConflict(err error) bool { + return strings.Contains(err.Error(), "Conflict. The name") +} + +func isUnknownContainer(err error) bool { + return strings.Contains(err.Error(), "No such container:") +} + +func isStoppedContainer(err error) bool { + return strings.Contains(err.Error(), "is already stopped") +} diff --git a/components/engine/daemon/cluster/executor/container/attachment.go b/components/engine/daemon/cluster/executor/container/attachment.go new file mode 100644 index 0000000000..54f95a1fbf --- /dev/null +++ b/components/engine/daemon/cluster/executor/container/attachment.go @@ -0,0 +1,81 @@ +package container + +import ( + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// networkAttacherController implements agent.Controller against docker's API. +// +// networkAttacherController manages the lifecycle of network +// attachment of a docker unmanaged container managed as a task from +// agent point of view. It provides network attachment information to +// the unmanaged container for it to attach to the network and run. +type networkAttacherController struct { + backend executorpkg.Backend + task *api.Task + adapter *containerAdapter + closed chan struct{} +} + +func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*networkAttacherController, error) { + adapter, err := newContainerAdapter(b, task, dependencies) + if err != nil { + return nil, err + } + + return &networkAttacherController{ + backend: b, + task: task, + adapter: adapter, + closed: make(chan struct{}), + }, nil +} + +func (nc *networkAttacherController) Update(ctx context.Context, t *api.Task) error { + return nil +} + +func (nc *networkAttacherController) Prepare(ctx context.Context) error { + // Make sure all the networks that the task needs are created. + if err := nc.adapter.createNetworks(ctx); err != nil { + return err + } + + return nil +} + +func (nc *networkAttacherController) Start(ctx context.Context) error { + return nc.adapter.networkAttach(ctx) +} + +func (nc *networkAttacherController) Wait(pctx context.Context) error { + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + return nc.adapter.waitForDetach(ctx) +} + +func (nc *networkAttacherController) Shutdown(ctx context.Context) error { + return nil +} + +func (nc *networkAttacherController) Terminate(ctx context.Context) error { + return nil +} + +func (nc *networkAttacherController) Remove(ctx context.Context) error { + // Try removing the network referenced in this task in case this + // task is the last one referencing it + if err := nc.adapter.removeNetworks(ctx); err != nil { + return err + } + + return nil +} + +func (nc *networkAttacherController) Close() error { + return nil +} diff --git a/components/engine/daemon/cluster/executor/container/container.go b/components/engine/daemon/cluster/executor/container/container.go new file mode 100644 index 0000000000..bc98e53955 --- /dev/null +++ b/components/engine/daemon/cluster/executor/container/container.go @@ -0,0 +1,676 @@ +package container + +import ( + "errors" + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + enginecontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + enginemount "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/go-connections/nat" + netconst "github.com/docker/libnetwork/datastore" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/template" + gogotypes "github.com/gogo/protobuf/types" +) + +const ( + // Explicitly use the kernel's default setting for CPU quota of 100ms. + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + cpuQuotaPeriod = 100 * time.Millisecond + + // systemLabelPrefix represents the reserved namespace for system labels. + systemLabelPrefix = "com.docker.swarm" +) + +// containerConfig converts task properties into docker container compatible +// components. +type containerConfig struct { + task *api.Task + networksAttachments map[string]*api.NetworkAttachment +} + +// newContainerConfig returns a validated container config. No methods should +// return an error if this function returns without error. +func newContainerConfig(t *api.Task) (*containerConfig, error) { + var c containerConfig + return &c, c.setTask(t) +} + +func (c *containerConfig) setTask(t *api.Task) error { + if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil { + return exec.ErrRuntimeUnsupported + } + + container := t.Spec.GetContainer() + if container != nil { + if container.Image == "" { + return ErrImageRequired + } + + if err := validateMounts(container.Mounts); err != nil { + return err + } + } + + // index the networks by name + c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks)) + for _, attachment := range t.Networks { + c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment + } + + c.task = t + + if t.Spec.GetContainer() != nil { + preparedSpec, err := template.ExpandContainerSpec(t) + if err != nil { + return err + } + c.task.Spec.Runtime = &api.TaskSpec_Container{ + Container: preparedSpec, + } + } + + return nil +} + +func (c *containerConfig) id() string { + attachment := c.task.Spec.GetAttachment() + if attachment == nil { + return "" + } + + return attachment.ContainerID +} + +func (c *containerConfig) taskID() string { + return c.task.ID +} + +func (c *containerConfig) endpoint() *api.Endpoint { + return c.task.Endpoint +} + +func (c *containerConfig) spec() *api.ContainerSpec { + return c.task.Spec.GetContainer() +} + +func (c *containerConfig) nameOrID() string { + if c.task.Spec.GetContainer() != nil { + return c.name() + } + + return c.id() +} + +func (c *containerConfig) name() string { + if c.task.Annotations.Name != "" { + // if set, use the container Annotations.Name field, set in the orchestrator. + return c.task.Annotations.Name + } + + slot := fmt.Sprint(c.task.Slot) + if slot == "" || c.task.Slot == 0 { + slot = c.task.NodeID + } + + // fallback to service.slot.id. + return fmt.Sprintf("%s.%s.%s", c.task.ServiceAnnotations.Name, slot, c.task.ID) +} + +func (c *containerConfig) image() string { + raw := c.spec().Image + ref, err := reference.ParseNormalizedNamed(raw) + if err != nil { + return raw + } + return reference.FamiliarString(reference.TagNameOnly(ref)) +} + +func (c *containerConfig) portBindings() nat.PortMap { + portBindings := nat.PortMap{} + if c.task.Endpoint == nil { + return portBindings + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) + binding := []nat.PortBinding{ + {}, + } + + if portConfig.PublishedPort != 0 { + binding[0].HostPort = strconv.Itoa(int(portConfig.PublishedPort)) + } + portBindings[port] = binding + } + + return portBindings +} + +func (c *containerConfig) exposedPorts() map[nat.Port]struct{} { + exposedPorts := make(map[nat.Port]struct{}) + if c.task.Endpoint == nil { + return exposedPorts + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) + exposedPorts[port] = struct{}{} + } + + return exposedPorts +} + +func (c *containerConfig) config() *enginecontainer.Config { + config := &enginecontainer.Config{ + Labels: c.labels(), + StopSignal: c.spec().StopSignal, + Tty: c.spec().TTY, + OpenStdin: c.spec().OpenStdin, + User: c.spec().User, + Env: c.spec().Env, + Hostname: c.spec().Hostname, + WorkingDir: c.spec().Dir, + Image: c.image(), + ExposedPorts: c.exposedPorts(), + Healthcheck: c.healthcheck(), + } + + if len(c.spec().Command) > 0 { + // If Command is provided, we replace the whole invocation with Command + // by replacing Entrypoint and specifying Cmd. Args is ignored in this + // case. + config.Entrypoint = append(config.Entrypoint, c.spec().Command...) + config.Cmd = append(config.Cmd, c.spec().Args...) + } else if len(c.spec().Args) > 0 { + // In this case, we assume the image has an Entrypoint and Args + // specifies the arguments for that entrypoint. + config.Cmd = c.spec().Args + } + + return config +} + +func (c *containerConfig) labels() map[string]string { + var ( + system = map[string]string{ + "task": "", // mark as cluster task + "task.id": c.task.ID, + "task.name": c.name(), + "node.id": c.task.NodeID, + "service.id": c.task.ServiceID, + "service.name": c.task.ServiceAnnotations.Name, + } + labels = make(map[string]string) + ) + + // base labels are those defined in the spec. + for k, v := range c.spec().Labels { + labels[k] = v + } + + // we then apply the overrides from the task, which may be set via the + // orchestrator. + for k, v := range c.task.Annotations.Labels { + labels[k] = v + } + + // finally, we apply the system labels, which override all labels. + for k, v := range system { + labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v + } + + return labels +} + +func (c *containerConfig) mounts() []enginemount.Mount { + var r []enginemount.Mount + for _, mount := range c.spec().Mounts { + r = append(r, convertMount(mount)) + } + return r +} + +func convertMount(m api.Mount) enginemount.Mount { + mount := enginemount.Mount{ + Source: m.Source, + Target: m.Target, + ReadOnly: m.ReadOnly, + } + + switch m.Type { + case api.MountTypeBind: + mount.Type = enginemount.TypeBind + case api.MountTypeVolume: + mount.Type = enginemount.TypeVolume + case api.MountTypeTmpfs: + mount.Type = enginemount.TypeTmpfs + } + + if m.BindOptions != nil { + mount.BindOptions = &enginemount.BindOptions{} + switch m.BindOptions.Propagation { + case api.MountPropagationRPrivate: + mount.BindOptions.Propagation = enginemount.PropagationRPrivate + case api.MountPropagationPrivate: + mount.BindOptions.Propagation = enginemount.PropagationPrivate + case api.MountPropagationRSlave: + mount.BindOptions.Propagation = enginemount.PropagationRSlave + case api.MountPropagationSlave: + mount.BindOptions.Propagation = enginemount.PropagationSlave + case api.MountPropagationRShared: + mount.BindOptions.Propagation = enginemount.PropagationRShared + case api.MountPropagationShared: + mount.BindOptions.Propagation = enginemount.PropagationShared + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &enginemount.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + } + if m.VolumeOptions.Labels != nil { + mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels)) + for k, v := range m.VolumeOptions.Labels { + mount.VolumeOptions.Labels[k] = v + } + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &enginemount.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + } + if m.VolumeOptions.DriverConfig.Options != nil { + mount.VolumeOptions.DriverConfig.Options = make(map[string]string, len(m.VolumeOptions.DriverConfig.Options)) + for k, v := range m.VolumeOptions.DriverConfig.Options { + mount.VolumeOptions.DriverConfig.Options[k] = v + } + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &enginemount.TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + + return mount +} + +func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig { + hcSpec := c.spec().Healthcheck + if hcSpec == nil { + return nil + } + interval, _ := gogotypes.DurationFromProto(hcSpec.Interval) + timeout, _ := gogotypes.DurationFromProto(hcSpec.Timeout) + startPeriod, _ := gogotypes.DurationFromProto(hcSpec.StartPeriod) + return &enginecontainer.HealthConfig{ + Test: hcSpec.Test, + Interval: interval, + Timeout: timeout, + Retries: int(hcSpec.Retries), + StartPeriod: startPeriod, + } +} + +func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { + hc := &enginecontainer.HostConfig{ + Resources: c.resources(), + GroupAdd: c.spec().Groups, + PortBindings: c.portBindings(), + Mounts: c.mounts(), + ReadonlyRootfs: c.spec().ReadOnly, + } + + if c.spec().DNSConfig != nil { + hc.DNS = c.spec().DNSConfig.Nameservers + hc.DNSSearch = c.spec().DNSConfig.Search + hc.DNSOptions = c.spec().DNSConfig.Options + } + + c.applyPrivileges(hc) + + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // However, the format of ExtraHosts in HostConfig is + // : + // We need to do the conversion here + // (Alias is ignored for now) + for _, entry := range c.spec().Hosts { + parts := strings.Fields(entry) + if len(parts) > 1 { + hc.ExtraHosts = append(hc.ExtraHosts, fmt.Sprintf("%s:%s", parts[1], parts[0])) + } + } + + if c.task.LogDriver != nil { + hc.LogConfig = enginecontainer.LogConfig{ + Type: c.task.LogDriver.Name, + Config: c.task.LogDriver.Options, + } + } + + if len(c.task.Networks) > 0 { + labels := c.task.Networks[0].Network.Spec.Annotations.Labels + name := c.task.Networks[0].Network.Spec.Annotations.Name + if v, ok := labels["com.docker.swarm.predefined"]; ok && v == "true" { + hc.NetworkMode = enginecontainer.NetworkMode(name) + } + } + + return hc +} + +// This handles the case of volumes that are defined inside a service Mount +func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volumetypes.VolumesCreateBody { + var ( + driverName string + driverOpts map[string]string + labels map[string]string + ) + + if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil { + driverName = mount.VolumeOptions.DriverConfig.Name + driverOpts = mount.VolumeOptions.DriverConfig.Options + labels = mount.VolumeOptions.Labels + } + + if mount.VolumeOptions != nil { + return &volumetypes.VolumesCreateBody{ + Name: mount.Source, + Driver: driverName, + DriverOpts: driverOpts, + Labels: labels, + } + } + return nil +} + +func (c *containerConfig) resources() enginecontainer.Resources { + resources := enginecontainer.Resources{} + + // If no limits are specified let the engine use its defaults. + // + // TODO(aluzzardi): We might want to set some limits anyway otherwise + // "unlimited" tasks will step over the reservation of other tasks. + r := c.task.Spec.Resources + if r == nil || r.Limits == nil { + return resources + } + + if r.Limits.MemoryBytes > 0 { + resources.Memory = r.Limits.MemoryBytes + } + + if r.Limits.NanoCPUs > 0 { + // CPU Period must be set in microseconds. + resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond) + resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9 + } + + return resources +} + +// Docker daemon supports just 1 network during container create. +func (c *containerConfig) createNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig { + var networks []*api.NetworkAttachment + if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil { + networks = c.task.Networks + } + + epConfig := make(map[string]*network.EndpointSettings) + if len(networks) > 0 { + epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0], b) + } + + return &network.NetworkingConfig{EndpointsConfig: epConfig} +} + +// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create +func (c *containerConfig) connectNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig { + var networks []*api.NetworkAttachment + if c.task.Spec.GetContainer() != nil { + networks = c.task.Networks + } + // First network is used during container create. Other networks are used in "docker network connect" + if len(networks) < 2 { + return nil + } + + epConfig := make(map[string]*network.EndpointSettings) + for _, na := range networks[1:] { + epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na, b) + } + return &network.NetworkingConfig{EndpointsConfig: epConfig} +} + +func getEndpointConfig(na *api.NetworkAttachment, b executorpkg.Backend) *network.EndpointSettings { + var ipv4, ipv6 string + for _, addr := range na.Addresses { + ip, _, err := net.ParseCIDR(addr) + if err != nil { + continue + } + + if ip.To4() != nil { + ipv4 = ip.String() + continue + } + + if ip.To16() != nil { + ipv6 = ip.String() + } + } + + n := &network.EndpointSettings{ + NetworkID: na.Network.ID, + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: ipv4, + IPv6Address: ipv6, + }, + Aliases: na.Aliases, + DriverOpts: na.DriverAttachmentOpts, + } + if v, ok := na.Network.Spec.Annotations.Labels["com.docker.swarm.predefined"]; ok && v == "true" { + if ln, err := b.FindNetwork(na.Network.Spec.Annotations.Name); err == nil { + n.NetworkID = ln.ID() + } + } + return n +} + +func (c *containerConfig) virtualIP(networkID string) string { + if c.task.Endpoint == nil { + return "" + } + + for _, eVip := range c.task.Endpoint.VirtualIPs { + // We only support IPv4 VIPs for now. + if eVip.NetworkID == networkID { + vip, _, err := net.ParseCIDR(eVip.Addr) + if err != nil { + return "" + } + + return vip.String() + } + } + + return "" +} + +func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig { + if len(c.task.Networks) == 0 { + return nil + } + + logrus.Debugf("Creating service config in agent for t = %+v", c.task) + svcCfg := &clustertypes.ServiceConfig{ + Name: c.task.ServiceAnnotations.Name, + Aliases: make(map[string][]string), + ID: c.task.ServiceID, + VirtualAddresses: make(map[string]*clustertypes.VirtualAddress), + } + + for _, na := range c.task.Networks { + svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{ + // We support only IPv4 virtual IP for now. + IPv4: c.virtualIP(na.Network.ID), + } + if len(na.Aliases) > 0 { + svcCfg.Aliases[na.Network.ID] = na.Aliases + } + } + + if c.task.Endpoint != nil { + for _, ePort := range c.task.Endpoint.Ports { + if ePort.PublishMode != api.PublishModeIngress { + continue + } + + svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{ + Name: ePort.Name, + Protocol: int32(ePort.Protocol), + TargetPort: ePort.TargetPort, + PublishedPort: ePort.PublishedPort, + }) + } + } + + return svcCfg +} + +// networks returns a list of network names attached to the container. The +// returned name can be used to lookup the corresponding network create +// options. +func (c *containerConfig) networks() []string { + var networks []string + + for name := range c.networksAttachments { + networks = append(networks, name) + } + + return networks +} + +func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) { + na, ok := c.networksAttachments[name] + if !ok { + return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced") + } + + options := types.NetworkCreate{ + // ID: na.Network.ID, + Labels: na.Network.Spec.Annotations.Labels, + Internal: na.Network.Spec.Internal, + Attachable: na.Network.Spec.Attachable, + Ingress: convert.IsIngressNetwork(na.Network), + EnableIPv6: na.Network.Spec.Ipv6Enabled, + CheckDuplicate: true, + Scope: netconst.SwarmScope, + } + + if na.Network.Spec.ConfigFrom != "" { + options.ConfigFrom = &network.ConfigReference{ + Network: na.Network.Spec.ConfigFrom, + } + } + + if na.Network.DriverState != nil { + options.Driver = na.Network.DriverState.Name + options.Options = na.Network.DriverState.Options + } + if na.Network.IPAM != nil { + options.IPAM = &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + Options: na.Network.IPAM.Driver.Options, + } + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + } + + return clustertypes.NetworkCreateRequest{ + ID: na.Network.ID, + NetworkCreateRequest: types.NetworkCreateRequest{ + Name: name, + NetworkCreate: options, + }, + }, nil +} + +func (c *containerConfig) applyPrivileges(hc *enginecontainer.HostConfig) { + privileges := c.spec().Privileges + if privileges == nil { + return + } + + credentials := privileges.CredentialSpec + if credentials != nil { + switch credentials.Source.(type) { + case *api.Privileges_CredentialSpec_File: + hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=file://"+credentials.GetFile()) + case *api.Privileges_CredentialSpec_Registry: + hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=registry://"+credentials.GetRegistry()) + } + } + + selinux := privileges.SELinuxContext + if selinux != nil { + if selinux.Disable { + hc.SecurityOpt = append(hc.SecurityOpt, "label=disable") + } + if selinux.User != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=user:"+selinux.User) + } + if selinux.Role != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=role:"+selinux.Role) + } + if selinux.Level != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=level:"+selinux.Level) + } + if selinux.Type != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=type:"+selinux.Type) + } + } +} + +func (c containerConfig) eventFilter() filters.Args { + filter := filters.NewArgs() + filter.Add("type", events.ContainerEventType) + filter.Add("name", c.name()) + filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID)) + return filter +} diff --git a/components/engine/daemon/cluster/executor/container/controller.go b/components/engine/daemon/cluster/executor/container/controller.go new file mode 100644 index 0000000000..8e95816138 --- /dev/null +++ b/components/engine/daemon/cluster/executor/container/controller.go @@ -0,0 +1,697 @@ +package container + +import ( + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/time/rate" +) + +const defaultGossipConvergeDelay = 2 * time.Second + +// controller implements agent.Controller against docker's API. +// +// Most operations against docker's API are done through the container name, +// which is unique to the task. +type controller struct { + task *api.Task + adapter *containerAdapter + closed chan struct{} + err error + + pulled chan struct{} // closed after pull + cancelPull func() // cancels pull context if not nil + pullErr error // pull error, only read after pulled closed +} + +var _ exec.Controller = &controller{} + +// NewController returns a docker exec runner for the provided task. +func newController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*controller, error) { + adapter, err := newContainerAdapter(b, task, dependencies) + if err != nil { + return nil, err + } + + return &controller{ + task: task, + adapter: adapter, + closed: make(chan struct{}), + }, nil +} + +func (r *controller) Task() (*api.Task, error) { + return r.task, nil +} + +// ContainerStatus returns the container-specific status for the task. +func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + return nil, err + } + return parseContainerStatus(ctnr) +} + +func (r *controller) PortStatus(ctx context.Context) (*api.PortStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + + return nil, err + } + + return parsePortStatus(ctnr) +} + +// Update tasks a recent task update and applies it to the container. +func (r *controller) Update(ctx context.Context, t *api.Task) error { + // TODO(stevvooe): While assignment of tasks is idempotent, we do allow + // updates of metadata, such as labelling, as well as any other properties + // that make sense. + return nil +} + +// Prepare creates a container and ensures the image is pulled. +// +// If the container has already be created, exec.ErrTaskPrepared is returned. +func (r *controller) Prepare(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + // Make sure all the networks that the task needs are created. + if err := r.adapter.createNetworks(ctx); err != nil { + return err + } + + // Make sure all the volumes that the task needs are created. + if err := r.adapter.createVolumes(ctx); err != nil { + return err + } + + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { + if r.pulled == nil { + // Fork the pull to a different context to allow pull to continue + // on re-entrant calls to Prepare. This ensures that Prepare can be + // idempotent and not incur the extra cost of pulling when + // cancelled on updates. + var pctx context.Context + + r.pulled = make(chan struct{}) + pctx, r.cancelPull = context.WithCancel(context.Background()) // TODO(stevvooe): Bind a context to the entire controller. + + go func() { + defer close(r.pulled) + r.pullErr = r.adapter.pullImage(pctx) // protected by closing r.pulled + }() + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-r.pulled: + if r.pullErr != nil { + // NOTE(stevvooe): We always try to pull the image to make sure we have + // the most up to date version. This will return an error, but we only + // log it. If the image truly doesn't exist, the create below will + // error out. + // + // This gives us some nice behavior where we use up to date versions of + // mutable tags, but will still run if the old image is available but a + // registry is down. + // + // If you don't want this behavior, lock down your image to an + // immutable tag or digest. + log.G(ctx).WithError(r.pullErr).Error("pulling image failed") + } + } + } + + if err := r.adapter.create(ctx); err != nil { + if isContainerCreateNameConflict(err) { + if _, err := r.adapter.inspect(ctx); err != nil { + return err + } + + // container is already created. success! + return exec.ErrTaskPrepared + } + + return err + } + + return nil +} + +// Start the container. An error will be returned if the container is already started. +func (r *controller) Start(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return err + } + + // Detect whether the container has *ever* been started. If so, we don't + // issue the start. + // + // TODO(stevvooe): This is very racy. While reading inspect, another could + // start the process and we could end up starting it twice. + if ctnr.State.Status != "created" { + return exec.ErrTaskStarted + } + + for { + if err := r.adapter.start(ctx); err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { + // Retry network creation again if we + // failed because some of the networks + // were not found. + if err := r.adapter.createNetworks(ctx); err != nil { + return err + } + + continue + } + + return errors.Wrap(err, "starting container failed") + } + + break + } + + // no health check + if ctnr.Config == nil || ctnr.Config.Healthcheck == nil || len(ctnr.Config.Healthcheck.Test) == 0 || ctnr.Config.Healthcheck.Test[0] == "NONE" { + if err := r.adapter.activateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s which has no healthcheck config", r.adapter.container.name()) + return err + } + return nil + } + + // wait for container to be healthy + eventq := r.adapter.events(ctx) + + var healthErr error + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "die": // exit on terminal events + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return errors.Wrap(err, "die event received") + } else if ctnr.State.ExitCode != 0 { + return &exitError{code: ctnr.State.ExitCode, cause: healthErr} + } + + return nil + case "destroy": + // If we get here, something has gone wrong but we want to exit + // and report anyways. + return ErrContainerDestroyed + case "health_status: unhealthy": + // in this case, we stop the container and report unhealthy status + if err := r.Shutdown(ctx); err != nil { + return errors.Wrap(err, "unhealthy container shutdown failed") + } + // set health check error, and wait for container to fully exit ("die" event) + healthErr = ErrContainerUnhealthy + case "health_status: healthy": + if err := r.adapter.activateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s after healthy event", r.adapter.container.name()) + return err + } + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +// Wait on the container to exit. +func (r *controller) Wait(pctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + healthErr := make(chan error, 1) + go func() { + ectx, cancel := context.WithCancel(ctx) // cancel event context on first event + defer cancel() + if err := r.checkHealth(ectx); err == ErrContainerUnhealthy { + healthErr <- ErrContainerUnhealthy + if err := r.Shutdown(ectx); err != nil { + log.G(ectx).WithError(err).Debug("shutdown failed on unhealthy") + } + } + }() + + waitC, err := r.adapter.wait(ctx) + if err != nil { + return err + } + + if status := <-waitC; status.ExitCode() != 0 { + exitErr := &exitError{ + code: status.ExitCode(), + } + + // Set the cause if it is knowable. + select { + case e := <-healthErr: + exitErr.cause = e + default: + if status.Err() != nil { + exitErr.cause = status.Err() + } + } + + return exitErr + } + + return nil +} + +func (r *controller) hasServiceBinding() bool { + if r.task == nil { + return false + } + + // service is attached to a network besides the default bridge + for _, na := range r.task.Networks { + if na.Network == nil || + na.Network.DriverState == nil || + na.Network.DriverState.Name == "bridge" && na.Network.Spec.Annotations.Name == "bridge" { + continue + } + return true + } + + return false +} + +// Shutdown the container cleanly. +func (r *controller) Shutdown(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + if r.hasServiceBinding() { + // remove container from service binding + if err := r.adapter.deactivateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Warningf("failed to deactivate service binding for container %s", r.adapter.container.name()) + // Don't return an error here, because failure to deactivate + // the service binding is expected if the container was never + // started. + } + + // add a delay for gossip converge + // TODO(dongluochen): this delay shoud be configurable to fit different cluster size and network delay. + time.Sleep(defaultGossipConvergeDelay) + } + + if err := r.adapter.shutdown(ctx); err != nil { + if isUnknownContainer(err) || isStoppedContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Terminate the container, with force. +func (r *controller) Terminate(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + if err := r.adapter.terminate(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Remove the container and its resources. +func (r *controller) Remove(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + // It may be necessary to shut down the task before removing it. + if err := r.Shutdown(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + // This may fail if the task was already shut down. + log.G(ctx).WithError(err).Debug("shutdown failed on removal") + } + + // Try removing networks referenced in this task in case this + // task is the last one referencing it + if err := r.adapter.removeNetworks(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + return err + } + + if err := r.adapter.remove(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + return nil +} + +// waitReady waits for a container to be "ready". +// Ready means it's past the started state. +func (r *controller) waitReady(pctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + eventq := r.adapter.events(ctx) + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if !isUnknownContainer(err) { + return errors.Wrap(err, "inspect container failed") + } + } else { + switch ctnr.State.Status { + case "running", "exited", "dead": + return nil + } + } + + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "start": + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, options api.LogSubscriptionOptions) error { + if err := r.checkClosed(); err != nil { + return err + } + + // if we're following, wait for this container to be ready. there is a + // problem here: if the container will never be ready (for example, it has + // been totally deleted) then this will wait forever. however, this doesn't + // actually cause any UI issues, and shouldn't be a problem. the stuck wait + // will go away when the follow (context) is canceled. + if options.Follow { + if err := r.waitReady(ctx); err != nil { + return errors.Wrap(err, "container not ready for logs") + } + } + // if we're not following, we're not gonna wait for the container to be + // ready. just call logs. if the container isn't ready, the call will fail + // and return an error. no big deal, we don't care, we only want the logs + // we can get RIGHT NOW with no follow + + logsContext, cancel := context.WithCancel(ctx) + msgs, err := r.adapter.logs(logsContext, options) + defer cancel() + if err != nil { + return errors.Wrap(err, "failed getting container logs") + } + + var ( + // use a rate limiter to keep things under control but also provides some + // ability coalesce messages. + limiter = rate.NewLimiter(rate.Every(time.Second), 10<<20) // 10 MB/s + msgctx = api.LogContext{ + NodeID: r.task.NodeID, + ServiceID: r.task.ServiceID, + TaskID: r.task.ID, + } + ) + + for { + msg, ok := <-msgs + if !ok { + // we're done here, no more messages + return nil + } + + if msg.Err != nil { + // the defered cancel closes the adapter's log stream + return msg.Err + } + + // wait here for the limiter to catch up + if err := limiter.WaitN(ctx, len(msg.Line)); err != nil { + return errors.Wrap(err, "failed rate limiter") + } + tsp, err := gogotypes.TimestampProto(msg.Timestamp) + if err != nil { + return errors.Wrap(err, "failed to convert timestamp") + } + var stream api.LogStream + if msg.Source == "stdout" { + stream = api.LogStreamStdout + } else if msg.Source == "stderr" { + stream = api.LogStreamStderr + } + + // parse the details out of the Attrs map + attrs := []api.LogAttr{} + for k, v := range msg.Attrs { + attr := api.LogAttr{Key: k, Value: v} + attrs = append(attrs, attr) + } + + if err := publisher.Publish(ctx, api.LogMessage{ + Context: msgctx, + Timestamp: tsp, + Stream: stream, + Attrs: attrs, + Data: msg.Line, + }); err != nil { + return errors.Wrap(err, "failed to publish log message") + } + } +} + +// Close the runner and clean up any ephemeral resources. +func (r *controller) Close() error { + select { + case <-r.closed: + return r.err + default: + if r.cancelPull != nil { + r.cancelPull() + } + + r.err = exec.ErrControllerClosed + close(r.closed) + } + return nil +} + +func (r *controller) matchevent(event events.Message) bool { + if event.Type != events.ContainerEventType { + return false + } + + // TODO(stevvooe): Filter based on ID matching, in addition to name. + + // Make sure the events are for this container. + if event.Actor.Attributes["name"] != r.adapter.container.name() { + return false + } + + return true +} + +func (r *controller) checkClosed() error { + select { + case <-r.closed: + return r.err + default: + return nil + } +} + +func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) { + status := &api.ContainerStatus{ + ContainerID: ctnr.ID, + PID: int32(ctnr.State.Pid), + ExitCode: int32(ctnr.State.ExitCode), + } + + return status, nil +} + +func parsePortStatus(ctnr types.ContainerJSON) (*api.PortStatus, error) { + status := &api.PortStatus{} + + if ctnr.NetworkSettings != nil && len(ctnr.NetworkSettings.Ports) > 0 { + exposedPorts, err := parsePortMap(ctnr.NetworkSettings.Ports) + if err != nil { + return nil, err + } + status.Ports = exposedPorts + } + + return status, nil +} + +func parsePortMap(portMap nat.PortMap) ([]*api.PortConfig, error) { + exposedPorts := make([]*api.PortConfig, 0, len(portMap)) + + for portProtocol, mapping := range portMap { + parts := strings.SplitN(string(portProtocol), "/", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid port mapping: %s", portProtocol) + } + + port, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return nil, err + } + + protocol := api.ProtocolTCP + switch strings.ToLower(parts[1]) { + case "tcp": + protocol = api.ProtocolTCP + case "udp": + protocol = api.ProtocolUDP + default: + return nil, fmt.Errorf("invalid protocol: %s", parts[1]) + } + + for _, binding := range mapping { + hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16) + if err != nil { + return nil, err + } + + // TODO(aluzzardi): We're losing the port `name` here since + // there's no way to retrieve it back from the Engine. + exposedPorts = append(exposedPorts, &api.PortConfig{ + PublishMode: api.PublishModeHost, + Protocol: protocol, + TargetPort: uint32(port), + PublishedPort: uint32(hostPort), + }) + } + } + + return exposedPorts, nil +} + +type exitError struct { + code int + cause error +} + +func (e *exitError) Error() string { + if e.cause != nil { + return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause) + } + + return fmt.Sprintf("task: non-zero exit (%v)", e.code) +} + +func (e *exitError) ExitCode() int { + return int(e.code) +} + +func (e *exitError) Cause() error { + return e.cause +} + +// checkHealth blocks until unhealthy container is detected or ctx exits +func (r *controller) checkHealth(ctx context.Context) error { + eventq := r.adapter.events(ctx) + + for { + select { + case <-ctx.Done(): + return nil + case <-r.closed: + return nil + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "health_status: unhealthy": + return ErrContainerUnhealthy + } + } + } +} diff --git a/components/engine/daemon/cluster/executor/container/errors.go b/components/engine/daemon/cluster/executor/container/errors.go new file mode 100644 index 0000000000..535d9b5706 --- /dev/null +++ b/components/engine/daemon/cluster/executor/container/errors.go @@ -0,0 +1,17 @@ +package container + +import ( + "errors" +) + +var ( + // ErrImageRequired returned if a task is missing the image definition. + ErrImageRequired = errors.New("dockerexec: image required") + + // ErrContainerDestroyed returned when a container is prematurely destroyed + // during a wait call. + ErrContainerDestroyed = errors.New("dockerexec: container destroyed") + + // ErrContainerUnhealthy returned if controller detects the health check failure + ErrContainerUnhealthy = errors.New("dockerexec: unhealthy container") +) diff --git a/components/engine/daemon/cluster/executor/container/executor.go b/components/engine/daemon/cluster/executor/container/executor.go new file mode 100644 index 0000000000..03a00cc87b --- /dev/null +++ b/components/engine/daemon/cluster/executor/container/executor.go @@ -0,0 +1,241 @@ +package container + +import ( + "fmt" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/controllers/plugin" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/naming" + "golang.org/x/net/context" +) + +type executor struct { + backend executorpkg.Backend + dependencies exec.DependencyManager +} + +// NewExecutor returns an executor from the docker client. +func NewExecutor(b executorpkg.Backend) exec.Executor { + return &executor{ + backend: b, + dependencies: agent.NewDependencyManager(), + } +} + +// Describe returns the underlying node description from the docker client. +func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { + info, err := e.backend.SystemInfo() + if err != nil { + return nil, err + } + + plugins := map[api.PluginDescription]struct{}{} + addPlugins := func(typ string, names []string) { + for _, name := range names { + plugins[api.PluginDescription{ + Type: typ, + Name: name, + }] = struct{}{} + } + } + + // add v1 plugins + addPlugins("Volume", info.Plugins.Volume) + // Add builtin driver "overlay" (the only builtin multi-host driver) to + // the plugin list by default. + addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...)) + addPlugins("Authorization", info.Plugins.Authorization) + addPlugins("Log", info.Plugins.Log) + + // add v2 plugins + v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs()) + if err == nil { + for _, plgn := range v2Plugins { + for _, typ := range plgn.Config.Interface.Types { + if typ.Prefix != "docker" || !plgn.Enabled { + continue + } + plgnTyp := typ.Capability + switch typ.Capability { + case "volumedriver": + plgnTyp = "Volume" + case "networkdriver": + plgnTyp = "Network" + case "logdriver": + plgnTyp = "Log" + } + + plugins[api.PluginDescription{ + Type: plgnTyp, + Name: plgn.Name, + }] = struct{}{} + } + } + } + + pluginFields := make([]api.PluginDescription, 0, len(plugins)) + for k := range plugins { + pluginFields = append(pluginFields, k) + } + + sort.Sort(sortedPlugins(pluginFields)) + + // parse []string labels into a map[string]string + labels := map[string]string{} + for _, l := range info.Labels { + stringSlice := strings.SplitN(l, "=", 2) + // this will take the last value in the list for a given key + // ideally, one shouldn't assign multiple values to the same key + if len(stringSlice) > 1 { + labels[stringSlice[0]] = stringSlice[1] + } + } + + description := &api.NodeDescription{ + Hostname: info.Name, + Platform: &api.Platform{ + Architecture: info.Architecture, + OS: info.OSType, + }, + Engine: &api.EngineDescription{ + EngineVersion: info.ServerVersion, + Labels: labels, + Plugins: pluginFields, + }, + Resources: &api.Resources{ + NanoCPUs: int64(info.NCPU) * 1e9, + MemoryBytes: info.MemTotal, + }, + } + + return description, nil +} + +func (e *executor) Configure(ctx context.Context, node *api.Node) error { + na := node.Attachment + if na == nil { + e.backend.ReleaseIngress() + return nil + } + + options := types.NetworkCreate{ + Driver: na.Network.DriverState.Name, + IPAM: &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + }, + Options: na.Network.DriverState.Options, + Ingress: true, + CheckDuplicate: true, + } + + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + + _, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{ + ID: na.Network.ID, + NetworkCreateRequest: types.NetworkCreateRequest{ + Name: na.Network.Spec.Annotations.Name, + NetworkCreate: options, + }, + }, na.Addresses[0]) + + return err +} + +// Controller returns a docker container runner. +func (e *executor) Controller(t *api.Task) (exec.Controller, error) { + dependencyGetter := agent.Restrict(e.dependencies, t) + + if t.Spec.GetAttachment() != nil { + return newNetworkAttacherController(e.backend, t, dependencyGetter) + } + + var ctlr exec.Controller + switch r := t.Spec.GetRuntime().(type) { + case *api.TaskSpec_Generic: + logrus.WithFields(logrus.Fields{ + "kind": r.Generic.Kind, + "type_url": r.Generic.Payload.TypeUrl, + }).Debug("custom runtime requested") + runtimeKind, err := naming.Runtime(t.Spec) + if err != nil { + return ctlr, err + } + switch runtimeKind { + case string(swarmtypes.RuntimePlugin): + c, err := plugin.NewController() + if err != nil { + return ctlr, err + } + ctlr = c + default: + return ctlr, fmt.Errorf("unsupported runtime type: %q", r.Generic.Kind) + } + case *api.TaskSpec_Container: + c, err := newController(e.backend, t, dependencyGetter) + if err != nil { + return ctlr, err + } + ctlr = c + default: + return ctlr, fmt.Errorf("unsupported runtime: %q", r) + } + + return ctlr, nil +} + +func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error { + nwKeys := []*networktypes.EncryptionKey{} + for _, key := range keys { + nwKey := &networktypes.EncryptionKey{ + Subsystem: key.Subsystem, + Algorithm: int32(key.Algorithm), + Key: make([]byte, len(key.Key)), + LamportTime: key.LamportTime, + } + copy(nwKey.Key, key.Key) + nwKeys = append(nwKeys, nwKey) + } + e.backend.SetNetworkBootstrapKeys(nwKeys) + + return nil +} + +func (e *executor) Secrets() exec.SecretsManager { + return e.dependencies.Secrets() +} + +func (e *executor) Configs() exec.ConfigsManager { + return e.dependencies.Configs() +} + +type sortedPlugins []api.PluginDescription + +func (sp sortedPlugins) Len() int { return len(sp) } + +func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] } + +func (sp sortedPlugins) Less(i, j int) bool { + if sp[i].Type != sp[j].Type { + return sp[i].Type < sp[j].Type + } + return sp[i].Name < sp[j].Name +} diff --git a/components/engine/daemon/cluster/executor/container/health_test.go b/components/engine/daemon/cluster/executor/container/health_test.go new file mode 100644 index 0000000000..4abf0999b4 --- /dev/null +++ b/components/engine/daemon/cluster/executor/container/health_test.go @@ -0,0 +1,102 @@ +// +build !windows + +package container + +import ( + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/events" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +func TestHealthStates(t *testing.T) { + + // set up environment: events, task, container .... + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + task := &api.Task{ + ID: "id", + ServiceID: "sid", + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + }, + }, + }, + Annotations: api.Annotations{Name: "name"}, + } + + c := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "id", + Name: "name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + }, + }, + } + + daemon := &daemon.Daemon{ + EventsService: e, + } + + controller, err := newController(daemon, task, nil) + if err != nil { + t.Fatalf("create controller fail %v", err) + } + + errChan := make(chan error, 1) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // fire checkHealth + go func() { + err := controller.checkHealth(ctx) + select { + case errChan <- err: + case <-ctx.Done(): + } + }() + + // send an event and expect to get expectedErr + // if expectedErr is nil, shouldn't get any error + logAndExpect := func(msg string, expectedErr error) { + daemon.LogContainerEvent(c, msg) + + timer := time.NewTimer(1 * time.Second) + defer timer.Stop() + + select { + case err := <-errChan: + if err != expectedErr { + t.Fatalf("expect error %v, but get %v", expectedErr, err) + } + case <-timer.C: + if expectedErr != nil { + t.Fatal("time limit exceeded, didn't get expected error") + } + } + } + + // events that are ignored by checkHealth + logAndExpect("health_status: running", nil) + logAndExpect("health_status: healthy", nil) + logAndExpect("die", nil) + + // unhealthy event will be caught by checkHealth + logAndExpect("health_status: unhealthy", ErrContainerUnhealthy) +} diff --git a/components/engine/daemon/cluster/executor/container/validate.go b/components/engine/daemon/cluster/executor/container/validate.go new file mode 100644 index 0000000000..af17f5b810 --- /dev/null +++ b/components/engine/daemon/cluster/executor/container/validate.go @@ -0,0 +1,40 @@ +package container + +import ( + "errors" + "fmt" + "path/filepath" + + "github.com/docker/swarmkit/api" +) + +func validateMounts(mounts []api.Mount) error { + for _, mount := range mounts { + // Target must always be absolute + if !filepath.IsAbs(mount.Target) { + return fmt.Errorf("invalid mount target, must be an absolute path: %s", mount.Target) + } + + switch mount.Type { + // The checks on abs paths are required due to the container API confusing + // volume mounts as bind mounts when the source is absolute (and vice-versa) + // See #25253 + // TODO: This is probably not necessary once #22373 is merged + case api.MountTypeBind: + if !filepath.IsAbs(mount.Source) { + return fmt.Errorf("invalid bind mount source, must be an absolute path: %s", mount.Source) + } + case api.MountTypeVolume: + if filepath.IsAbs(mount.Source) { + return fmt.Errorf("invalid volume mount source, must not be an absolute path: %s", mount.Source) + } + case api.MountTypeTmpfs: + if mount.Source != "" { + return errors.New("invalid tmpfs source, source must be empty") + } + default: + return fmt.Errorf("invalid mount type: %s", mount.Type) + } + } + return nil +} diff --git a/components/engine/daemon/cluster/executor/container/validate_test.go b/components/engine/daemon/cluster/executor/container/validate_test.go new file mode 100644 index 0000000000..9d98e2c008 --- /dev/null +++ b/components/engine/daemon/cluster/executor/container/validate_test.go @@ -0,0 +1,141 @@ +package container + +import ( + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/swarmkit/api" +) + +func newTestControllerWithMount(m api.Mount) (*controller, error) { + return newController(&daemon.Daemon{}, &api.Task{ + ID: stringid.GenerateRandomID(), + ServiceID: stringid.GenerateRandomID(), + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + Mounts: []api.Mount{m}, + }, + }, + }, + }, nil) +} + +func TestControllerValidateMountBind(t *testing.T) { + // with improper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid bind mount source") { + t.Fatalf("expected error, got: %v", err) + } + + // with non-existing source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: testAbsNonExistent, + Target: testAbsPath, + }); err != nil { + t.Fatalf("controller should not error at creation: %v", err) + } + + // with proper source + tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountBind") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.Remove(tmpdir) + + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: tmpdir, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected error, got: %v", err) + } +} + +func TestControllerValidateMountVolume(t *testing.T) { + // with improper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeVolume, + Source: testAbsPath, + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid volume mount source") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeVolume, + Source: "foo", + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected error, got: %v", err) + } +} + +func TestControllerValidateMountTarget(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountTarget") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.Remove(tmpdir) + + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: testAbsPath, + Target: "foo", + }); err == nil || !strings.Contains(err.Error(), "invalid mount target") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: tmpdir, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected no error, got: %v", err) + } +} + +func TestControllerValidateMountTmpfs(t *testing.T) { + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeTmpfs, + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid tmpfs source") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeTmpfs, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected no error, got: %v", err) + } +} + +func TestControllerValidateMountInvalidType(t *testing.T) { + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.Mount_MountType(9999), + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid mount type") { + t.Fatalf("expected error, got: %v", err) + } +} diff --git a/components/engine/daemon/cluster/executor/container/validate_unix_test.go b/components/engine/daemon/cluster/executor/container/validate_unix_test.go new file mode 100644 index 0000000000..c616eeef93 --- /dev/null +++ b/components/engine/daemon/cluster/executor/container/validate_unix_test.go @@ -0,0 +1,8 @@ +// +build !windows + +package container + +const ( + testAbsPath = "/foo" + testAbsNonExistent = "/some-non-existing-host-path/" +) diff --git a/components/engine/daemon/cluster/executor/container/validate_windows_test.go b/components/engine/daemon/cluster/executor/container/validate_windows_test.go new file mode 100644 index 0000000000..c346451d3d --- /dev/null +++ b/components/engine/daemon/cluster/executor/container/validate_windows_test.go @@ -0,0 +1,8 @@ +// +build windows + +package container + +const ( + testAbsPath = `c:\foo` + testAbsNonExistent = `c:\some-non-existing-host-path\` +) diff --git a/components/engine/daemon/cluster/filters.go b/components/engine/daemon/cluster/filters.go new file mode 100644 index 0000000000..0a004af223 --- /dev/null +++ b/components/engine/daemon/cluster/filters.go @@ -0,0 +1,121 @@ +package cluster + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/filters" + runconfigopts "github.com/docker/docker/runconfig/opts" + swarmapi "github.com/docker/swarmkit/api" +) + +func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "role": true, + "membership": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + f := &swarmapi.ListNodesRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + } + + for _, r := range filter.Get("role") { + if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok { + f.Roles = append(f.Roles, swarmapi.NodeRole(role)) + } else if r != "" { + return nil, fmt.Errorf("Invalid role filter: '%s'", r) + } + } + + for _, a := range filter.Get("membership") { + if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok { + f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership)) + } else if a != "" { + return nil, fmt.Errorf("Invalid membership filter: '%s'", a) + } + } + + return f, nil +} + +func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) error) (*swarmapi.ListTasksRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "service": true, + "node": true, + "desired-state": true, + // UpToDate is not meant to be exposed to users. It's for + // internal use in checking create/update progress. Therefore, + // we prefix it with a '_'. + "_up-to-date": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + if transformFunc != nil { + if err := transformFunc(filter); err != nil { + return nil, err + } + } + f := &swarmapi.ListTasksRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + ServiceIDs: filter.Get("service"), + NodeIDs: filter.Get("node"), + UpToDate: len(filter.Get("_up-to-date")) != 0, + } + + for _, s := range filter.Get("desired-state") { + if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok { + f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state)) + } else if s != "" { + return nil, fmt.Errorf("Invalid desired-state filter: '%s'", s) + } + } + + return f, nil +} + +func newListSecretsFilters(filter filters.Args) (*swarmapi.ListSecretsRequest_Filters, error) { + accepted := map[string]bool{ + "names": true, + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListSecretsRequest_Filters{ + Names: filter.Get("names"), + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} + +func newListConfigsFilters(filter filters.Args) (*swarmapi.ListConfigsRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListConfigsRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} diff --git a/components/engine/daemon/cluster/filters_test.go b/components/engine/daemon/cluster/filters_test.go new file mode 100644 index 0000000000..fd0c8c3692 --- /dev/null +++ b/components/engine/daemon/cluster/filters_test.go @@ -0,0 +1,102 @@ +package cluster + +import ( + "testing" + + "github.com/docker/docker/api/types/filters" +) + +func TestNewListSecretsFilters(t *testing.T) { + validNameFilter := filters.NewArgs() + validNameFilter.Add("name", "test_name") + + validIDFilter := filters.NewArgs() + validIDFilter.Add("id", "7c9009d6720f6de3b492f5") + + validLabelFilter := filters.NewArgs() + validLabelFilter.Add("label", "type=test") + validLabelFilter.Add("label", "storage=ssd") + validLabelFilter.Add("label", "memory") + + validNamesFilter := filters.NewArgs() + validNamesFilter.Add("names", "test_name") + + validAllFilter := filters.NewArgs() + validAllFilter.Add("name", "nodeName") + validAllFilter.Add("id", "7c9009d6720f6de3b492f5") + validAllFilter.Add("label", "type=test") + validAllFilter.Add("label", "memory") + validAllFilter.Add("names", "test_name") + + validFilters := []filters.Args{ + validNameFilter, + validIDFilter, + validLabelFilter, + validNamesFilter, + validAllFilter, + } + + invalidTypeFilter := filters.NewArgs() + invalidTypeFilter.Add("nonexist", "aaaa") + + invalidFilters := []filters.Args{ + invalidTypeFilter, + } + + for _, filter := range validFilters { + if _, err := newListSecretsFilters(filter); err != nil { + t.Fatalf("Should get no error, got %v", err) + } + } + + for _, filter := range invalidFilters { + if _, err := newListSecretsFilters(filter); err == nil { + t.Fatalf("Should get an error for filter %v, while got nil", filter) + } + } +} + +func TestNewListConfigsFilters(t *testing.T) { + validNameFilter := filters.NewArgs() + validNameFilter.Add("name", "test_name") + + validIDFilter := filters.NewArgs() + validIDFilter.Add("id", "7c9009d6720f6de3b492f5") + + validLabelFilter := filters.NewArgs() + validLabelFilter.Add("label", "type=test") + validLabelFilter.Add("label", "storage=ssd") + validLabelFilter.Add("label", "memory") + + validAllFilter := filters.NewArgs() + validAllFilter.Add("name", "nodeName") + validAllFilter.Add("id", "7c9009d6720f6de3b492f5") + validAllFilter.Add("label", "type=test") + validAllFilter.Add("label", "memory") + + validFilters := []filters.Args{ + validNameFilter, + validIDFilter, + validLabelFilter, + validAllFilter, + } + + invalidTypeFilter := filters.NewArgs() + invalidTypeFilter.Add("nonexist", "aaaa") + + invalidFilters := []filters.Args{ + invalidTypeFilter, + } + + for _, filter := range validFilters { + if _, err := newListConfigsFilters(filter); err != nil { + t.Fatalf("Should get no error, got %v", err) + } + } + + for _, filter := range invalidFilters { + if _, err := newListConfigsFilters(filter); err == nil { + t.Fatalf("Should get an error for filter %v, while got nil", filter) + } + } +} diff --git a/components/engine/daemon/cluster/helpers.go b/components/engine/daemon/cluster/helpers.go new file mode 100644 index 0000000000..a74118c422 --- /dev/null +++ b/components/engine/daemon/cluster/helpers.go @@ -0,0 +1,245 @@ +package cluster + +import ( + "fmt" + + "github.com/docker/docker/api/errors" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) { + rl, err := c.ListClusters(ctx, &swarmapi.ListClustersRequest{}) + if err != nil { + return nil, err + } + + if len(rl.Clusters) == 0 { + return nil, errors.NewRequestNotFoundError(errNoSwarm) + } + + // TODO: assume one cluster only + return rl.Clusters[0], nil +} + +func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) { + // GetNode to match via full ID. + if rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input}); err == nil { + return rg.Node, nil + } + + // If any error (including NotFound), ListNodes to match via full name. + rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{ + Filters: &swarmapi.ListNodesRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Nodes) == 0 { + // If any error or 0 result, ListNodes to match via ID prefix. + rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{ + Filters: &swarmapi.ListNodesRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Nodes) == 0 { + err := fmt.Errorf("node %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Nodes); l > 1 { + return nil, fmt.Errorf("node %s is ambiguous (%d matches found)", input, l) + } + + return rl.Nodes[0], nil +} + +func getService(ctx context.Context, c swarmapi.ControlClient, input string, insertDefaults bool) (*swarmapi.Service, error) { + // GetService to match via full ID. + if rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input, InsertDefaults: insertDefaults}); err == nil { + return rg.Service, nil + } + + // If any error (including NotFound), ListServices to match via full name. + rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{ + Filters: &swarmapi.ListServicesRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Services) == 0 { + // If any error or 0 result, ListServices to match via ID prefix. + rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{ + Filters: &swarmapi.ListServicesRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Services) == 0 { + err := fmt.Errorf("service %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Services); l > 1 { + return nil, fmt.Errorf("service %s is ambiguous (%d matches found)", input, l) + } + + if !insertDefaults { + return rl.Services[0], nil + } + + rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: rl.Services[0].ID, InsertDefaults: true}) + if err == nil { + return rg.Service, nil + } + return nil, err +} + +func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) { + // GetTask to match via full ID. + if rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input}); err == nil { + return rg.Task, nil + } + + // If any error (including NotFound), ListTasks to match via full name. + rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{ + Filters: &swarmapi.ListTasksRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Tasks) == 0 { + // If any error or 0 result, ListTasks to match via ID prefix. + rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{ + Filters: &swarmapi.ListTasksRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Tasks) == 0 { + err := fmt.Errorf("task %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Tasks); l > 1 { + return nil, fmt.Errorf("task %s is ambiguous (%d matches found)", input, l) + } + + return rl.Tasks[0], nil +} + +func getSecret(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Secret, error) { + // attempt to lookup secret by full ID + if rg, err := c.GetSecret(ctx, &swarmapi.GetSecretRequest{SecretID: input}); err == nil { + return rg.Secret, nil + } + + // If any error (including NotFound), ListSecrets to match via full name. + rl, err := c.ListSecrets(ctx, &swarmapi.ListSecretsRequest{ + Filters: &swarmapi.ListSecretsRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Secrets) == 0 { + // If any error or 0 result, ListSecrets to match via ID prefix. + rl, err = c.ListSecrets(ctx, &swarmapi.ListSecretsRequest{ + Filters: &swarmapi.ListSecretsRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Secrets) == 0 { + err := fmt.Errorf("secret %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Secrets); l > 1 { + return nil, fmt.Errorf("secret %s is ambiguous (%d matches found)", input, l) + } + + return rl.Secrets[0], nil +} + +func getConfig(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Config, error) { + // attempt to lookup config by full ID + if rg, err := c.GetConfig(ctx, &swarmapi.GetConfigRequest{ConfigID: input}); err == nil { + return rg.Config, nil + } + + // If any error (including NotFound), ListConfigs to match via full name. + rl, err := c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{ + Filters: &swarmapi.ListConfigsRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Configs) == 0 { + // If any error or 0 result, ListConfigs to match via ID prefix. + rl, err = c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{ + Filters: &swarmapi.ListConfigsRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Configs) == 0 { + err := fmt.Errorf("config %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Configs); l > 1 { + return nil, fmt.Errorf("config %s is ambiguous (%d matches found)", input, l) + } + + return rl.Configs[0], nil +} + +func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) { + // GetNetwork to match via full ID. + if rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input}); err == nil { + return rg.Network, nil + } + + // If any error (including NotFound), ListNetworks to match via ID prefix and full name. + rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{ + Filters: &swarmapi.ListNetworksRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Networks) == 0 { + rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{ + Filters: &swarmapi.ListNetworksRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Networks) == 0 { + return nil, fmt.Errorf("network %s not found", input) + } + + if l := len(rl.Networks); l > 1 { + return nil, fmt.Errorf("network %s is ambiguous (%d matches found)", input, l) + } + + return rl.Networks[0], nil +} diff --git a/components/engine/daemon/cluster/listen_addr.go b/components/engine/daemon/cluster/listen_addr.go new file mode 100644 index 0000000000..993ccb62ad --- /dev/null +++ b/components/engine/daemon/cluster/listen_addr.go @@ -0,0 +1,302 @@ +package cluster + +import ( + "errors" + "fmt" + "net" +) + +var ( + errNoSuchInterface = errors.New("no such interface") + errNoIP = errors.New("could not find the system's IP address") + errMustSpecifyListenAddr = errors.New("must specify a listening address because the address to advertise is not recognized as a system address, and a system's IP address to use could not be uniquely identified") + errBadNetworkIdentifier = errors.New("must specify a valid IP address or interface name") + errBadListenAddr = errors.New("listen address must be an IP address or network interface (with optional port number)") + errBadAdvertiseAddr = errors.New("advertise address must be a non-zero IP address or network interface (with optional port number)") + errBadDataPathAddr = errors.New("data path address must be a non-zero IP address or network interface (without a port number)") + errBadDefaultAdvertiseAddr = errors.New("default advertise address must be a non-zero IP address or network interface (without a port number)") +) + +func resolveListenAddr(specifiedAddr string) (string, string, error) { + specifiedHost, specifiedPort, err := net.SplitHostPort(specifiedAddr) + if err != nil { + return "", "", fmt.Errorf("could not parse listen address %s", specifiedAddr) + } + // Does the host component match any of the interface names on the + // system? If so, use the address from that interface. + specifiedIP, err := resolveInputIPAddr(specifiedHost, true) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadListenAddr + } + return "", "", err + } + + return specifiedIP.String(), specifiedPort, nil +} + +func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (string, string, error) { + // Approach: + // - If an advertise address is specified, use that. Resolve the + // interface's address if an interface was specified in + // advertiseAddr. Fill in the port from listenAddrPort if necessary. + // - If DefaultAdvertiseAddr is not empty, use that with the port from + // listenAddrPort. Resolve the interface's address from + // if an interface name was specified in DefaultAdvertiseAddr. + // - Otherwise, try to autodetect the system's address. Use the port in + // listenAddrPort with this address if autodetection succeeds. + + if advertiseAddr != "" { + advertiseHost, advertisePort, err := net.SplitHostPort(advertiseAddr) + if err != nil { + // Not a host:port specification + advertiseHost = advertiseAddr + advertisePort = listenAddrPort + } + // Does the host component match any of the interface names on the + // system? If so, use the address from that interface. + advertiseIP, err := resolveInputIPAddr(advertiseHost, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadAdvertiseAddr + } + return "", "", err + } + + return advertiseIP.String(), advertisePort, nil + } + + if c.config.DefaultAdvertiseAddr != "" { + // Does the default advertise address component match any of the + // interface names on the system? If so, use the address from + // that interface. + defaultAdvertiseIP, err := resolveInputIPAddr(c.config.DefaultAdvertiseAddr, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadDefaultAdvertiseAddr + } + return "", "", err + } + + return defaultAdvertiseIP.String(), listenAddrPort, nil + } + + systemAddr, err := c.resolveSystemAddr() + if err != nil { + return "", "", err + } + return systemAddr.String(), listenAddrPort, nil +} + +func resolveDataPathAddr(dataPathAddr string) (string, error) { + if dataPathAddr == "" { + // dataPathAddr is not defined + return "", nil + } + // If a data path flag is specified try to resolve the IP address. + dataPathIP, err := resolveInputIPAddr(dataPathAddr, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadDataPathAddr + } + return "", err + } + return dataPathIP.String(), nil +} + +func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { + // Use a specific interface's IP address. + intf, err := net.InterfaceByName(specifiedInterface) + if err != nil { + return nil, errNoSuchInterface + } + + addrs, err := intf.Addrs() + if err != nil { + return nil, err + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + if ok { + if ipAddr.IP.To4() != nil { + // IPv4 + if interfaceAddr4 != nil { + return nil, fmt.Errorf("interface %s has more than one IPv4 address (%s and %s)", specifiedInterface, interfaceAddr4, ipAddr.IP) + } + interfaceAddr4 = ipAddr.IP + } else { + // IPv6 + if interfaceAddr6 != nil { + return nil, fmt.Errorf("interface %s has more than one IPv6 address (%s and %s)", specifiedInterface, interfaceAddr6, ipAddr.IP) + } + interfaceAddr6 = ipAddr.IP + } + } + } + + if interfaceAddr4 == nil && interfaceAddr6 == nil { + return nil, fmt.Errorf("interface %s has no usable IPv4 or IPv6 address", specifiedInterface) + } + + // In the case that there's exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + return interfaceAddr4, nil + } + return interfaceAddr6, nil +} + +// resolveInputIPAddr tries to resolve the IP address from the string passed as input +// - tries to match the string as an interface name, if so returns the IP address associated with it +// - on failure of previous step tries to parse the string as an IP address itself +// if succeeds returns the IP address +func resolveInputIPAddr(input string, isUnspecifiedValid bool) (net.IP, error) { + // Try to see if it is an interface name + interfaceAddr, err := resolveInterfaceAddr(input) + if err == nil { + return interfaceAddr, nil + } + // String matched interface but there is a potential ambiguity to be resolved + if err != errNoSuchInterface { + return nil, err + } + + // String is not an interface check if it is a valid IP + if ip := net.ParseIP(input); ip != nil && (isUnspecifiedValid || !ip.IsUnspecified()) { + return ip, nil + } + + // Not valid IP found + return nil, errBadNetworkIdentifier +} + +func (c *Cluster) resolveSystemAddrViaSubnetCheck() (net.IP, error) { + // Use the system's only IP address, or fail if there are + // multiple addresses to choose from. Skip interfaces which + // are managed by docker via subnet check. + interfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + var systemAddr net.IP + var systemInterface string + + // List Docker-managed subnets + v4Subnets, v6Subnets := c.config.NetworkSubnetsProvider.Subnets() + +ifaceLoop: + for _, intf := range interfaces { + // Skip inactive interfaces and loopback interfaces + if (intf.Flags&net.FlagUp == 0) || (intf.Flags&net.FlagLoopback) != 0 { + continue + } + + addrs, err := intf.Addrs() + if err != nil { + continue + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + // Skip loopback and link-local addresses + if !ok || !ipAddr.IP.IsGlobalUnicast() { + continue + } + + if ipAddr.IP.To4() != nil { + // IPv4 + + // Ignore addresses in subnets that are managed by Docker. + for _, subnet := range v4Subnets { + if subnet.Contains(ipAddr.IP) { + continue ifaceLoop + } + } + + if interfaceAddr4 != nil { + return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr4, ipAddr.IP) + } + + interfaceAddr4 = ipAddr.IP + } else { + // IPv6 + + // Ignore addresses in subnets that are managed by Docker. + for _, subnet := range v6Subnets { + if subnet.Contains(ipAddr.IP) { + continue ifaceLoop + } + } + + if interfaceAddr6 != nil { + return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr6, ipAddr.IP) + } + + interfaceAddr6 = ipAddr.IP + } + } + + // In the case that this interface has exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr4) + } + systemAddr = interfaceAddr4 + systemInterface = intf.Name + } else if interfaceAddr6 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr6) + } + systemAddr = interfaceAddr6 + systemInterface = intf.Name + } + } + + if systemAddr == nil { + return nil, errNoIP + } + + return systemAddr, nil +} + +func listSystemIPs() []net.IP { + interfaces, err := net.Interfaces() + if err != nil { + return nil + } + + var systemAddrs []net.IP + + for _, intf := range interfaces { + addrs, err := intf.Addrs() + if err != nil { + continue + } + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + if ok { + systemAddrs = append(systemAddrs, ipAddr.IP) + } + } + } + + return systemAddrs +} + +func errMultipleIPs(interfaceA, interfaceB string, addrA, addrB net.IP) error { + if interfaceA == interfaceB { + return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on interface %s (%s and %s)", interfaceA, addrA, addrB) + } + return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (%s on %s and %s on %s)", addrA, interfaceA, addrB, interfaceB) +} diff --git a/components/engine/daemon/cluster/listen_addr_linux.go b/components/engine/daemon/cluster/listen_addr_linux.go new file mode 100644 index 0000000000..3d4f239bda --- /dev/null +++ b/components/engine/daemon/cluster/listen_addr_linux.go @@ -0,0 +1,91 @@ +// +build linux + +package cluster + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + // Use the system's only device IP address, or fail if there are + // multiple addresses to choose from. + interfaces, err := netlink.LinkList() + if err != nil { + return nil, err + } + + var ( + systemAddr net.IP + systemInterface string + deviceFound bool + ) + + for _, intf := range interfaces { + // Skip non device or inactive interfaces + if intf.Type() != "device" || intf.Attrs().Flags&net.FlagUp == 0 { + continue + } + + addrs, err := netlink.AddrList(intf, netlink.FAMILY_ALL) + if err != nil { + continue + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr := addr.IPNet.IP + + // Skip loopback and link-local addresses + if !ipAddr.IsGlobalUnicast() { + continue + } + + // At least one non-loopback device is found and it is administratively up + deviceFound = true + + if ipAddr.To4() != nil { + if interfaceAddr4 != nil { + return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr4, ipAddr) + } + interfaceAddr4 = ipAddr + } else { + if interfaceAddr6 != nil { + return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr6, ipAddr) + } + interfaceAddr6 = ipAddr + } + } + + // In the case that this interface has exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr4) + } + systemAddr = interfaceAddr4 + systemInterface = intf.Attrs().Name + } else if interfaceAddr6 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr6) + } + systemAddr = interfaceAddr6 + systemInterface = intf.Attrs().Name + } + } + + if systemAddr == nil { + if !deviceFound { + // If no non-loopback device type interface is found, + // fall back to the regular auto-detection mechanism. + // This is to cover the case where docker is running + // inside a container (eths are in fact veths). + return c.resolveSystemAddrViaSubnetCheck() + } + return nil, errNoIP + } + + return systemAddr, nil +} diff --git a/components/engine/daemon/cluster/listen_addr_others.go b/components/engine/daemon/cluster/listen_addr_others.go new file mode 100644 index 0000000000..4e845f5c8f --- /dev/null +++ b/components/engine/daemon/cluster/listen_addr_others.go @@ -0,0 +1,9 @@ +// +build !linux,!solaris + +package cluster + +import "net" + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + return c.resolveSystemAddrViaSubnetCheck() +} diff --git a/components/engine/daemon/cluster/listen_addr_solaris.go b/components/engine/daemon/cluster/listen_addr_solaris.go new file mode 100644 index 0000000000..57a894b251 --- /dev/null +++ b/components/engine/daemon/cluster/listen_addr_solaris.go @@ -0,0 +1,57 @@ +package cluster + +import ( + "bufio" + "fmt" + "net" + "os/exec" + "strings" +) + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + defRouteCmd := "/usr/sbin/ipadm show-addr -p -o addr " + + "`/usr/sbin/route get default | /usr/bin/grep interface | " + + "/usr/bin/awk '{print $2}'`" + out, err := exec.Command("/usr/bin/bash", "-c", defRouteCmd).Output() + if err != nil { + return nil, fmt.Errorf("cannot get default route: %v", err) + } + + defInterface := strings.SplitN(string(out), "/", 2) + defInterfaceIP := net.ParseIP(defInterface[0]) + + return defInterfaceIP, nil +} + +func listSystemIPs() []net.IP { + var systemAddrs []net.IP + cmd := exec.Command("/usr/sbin/ipadm", "show-addr", "-p", "-o", "addr") + cmdReader, err := cmd.StdoutPipe() + if err != nil { + return nil + } + + if err := cmd.Start(); err != nil { + return nil + } + + scanner := bufio.NewScanner(cmdReader) + go func() { + for scanner.Scan() { + text := scanner.Text() + nameAddrPair := strings.SplitN(text, "/", 2) + // Let go of loopback interfaces and docker interfaces + systemAddrs = append(systemAddrs, net.ParseIP(nameAddrPair[0])) + } + }() + + if err := scanner.Err(); err != nil { + fmt.Printf("scan underwent err: %+v\n", err) + } + + if err := cmd.Wait(); err != nil { + fmt.Printf("run command wait: %+v\n", err) + } + + return systemAddrs +} diff --git a/components/engine/daemon/cluster/networks.go b/components/engine/daemon/cluster/networks.go new file mode 100644 index 0000000000..1906c37bd2 --- /dev/null +++ b/components/engine/daemon/cluster/networks.go @@ -0,0 +1,317 @@ +package cluster + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + "github.com/docker/docker/runconfig" + swarmapi "github.com/docker/swarmkit/api" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// GetNetworks returns all current cluster managed networks. +func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) { + list, err := c.getNetworks(nil) + if err != nil { + return nil, err + } + removePredefinedNetworks(&list) + return list, nil +} + +func removePredefinedNetworks(networks *[]apitypes.NetworkResource) { + if networks == nil { + return + } + var idxs []int + for i, n := range *networks { + if v, ok := n.Labels["com.docker.swarm.predefined"]; ok && v == "true" { + idxs = append(idxs, i) + } + } + for i, idx := range idxs { + idx -= i + *networks = append((*networks)[:idx], (*networks)[idx+1:]...) + } +} + +func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]apitypes.NetworkResource, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: filters}) + if err != nil { + return nil, err + } + + networks := make([]apitypes.NetworkResource, 0, len(r.Networks)) + + for _, network := range r.Networks { + networks = append(networks, convert.BasicNetworkFromGRPC(*network)) + } + + return networks, nil +} + +// GetNetwork returns a cluster network by an ID. +func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) { + var network *swarmapi.Network + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + n, err := getNetwork(ctx, state.controlClient, input) + if err != nil { + return err + } + network = n + return nil + }); err != nil { + return apitypes.NetworkResource{}, err + } + return convert.BasicNetworkFromGRPC(*network), nil +} + +// GetNetworksByName returns cluster managed networks by name. +// It is ok to have multiple networks here. #18864 +func (c *Cluster) GetNetworksByName(name string) ([]apitypes.NetworkResource, error) { + // Note that swarmapi.GetNetworkRequest.Name is not functional. + // So we cannot just use that with c.GetNetwork. + return c.getNetworks(&swarmapi.ListNetworksRequest_Filters{ + Names: []string{name}, + }) +} + +func attacherKey(target, containerID string) string { + return containerID + ":" + target +} + +// UpdateAttachment signals the attachment config to the attachment +// waiter who is trying to start or attach the container to the +// network. +func (c *Cluster) UpdateAttachment(target, containerID string, config *network.NetworkingConfig) error { + c.mu.Lock() + attacher, ok := c.attachers[attacherKey(target, containerID)] + if !ok || attacher == nil { + c.mu.Unlock() + return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target) + } + if attacher.inProgress { + logrus.Debugf("Discarding redundant notice of resource allocation on network %s for task id %s", target, attacher.taskID) + c.mu.Unlock() + return nil + } + attacher.inProgress = true + c.mu.Unlock() + + attacher.attachWaitCh <- config + + return nil +} + +// WaitForDetachment waits for the container to stop or detach from +// the network. +func (c *Cluster) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { + c.mu.RLock() + attacher, ok := c.attachers[attacherKey(networkName, containerID)] + if !ok { + attacher, ok = c.attachers[attacherKey(networkID, containerID)] + } + state := c.currentNodeState() + if state.swarmNode == nil || state.swarmNode.Agent() == nil { + c.mu.RUnlock() + return errors.New("invalid cluster node while waiting for detachment") + } + + c.mu.RUnlock() + agent := state.swarmNode.Agent() + if ok && attacher != nil && + attacher.detachWaitCh != nil && + attacher.attachCompleteCh != nil { + // Attachment may be in progress still so wait for + // attachment to complete. + select { + case <-attacher.attachCompleteCh: + case <-ctx.Done(): + return ctx.Err() + } + + if attacher.taskID == taskID { + select { + case <-attacher.detachWaitCh: + case <-ctx.Done(): + return ctx.Err() + } + } + } + + return agent.ResourceAllocator().DetachNetwork(ctx, taskID) +} + +// AttachNetwork generates an attachment request towards the manager. +func (c *Cluster) AttachNetwork(target string, containerID string, addresses []string) (*network.NetworkingConfig, error) { + aKey := attacherKey(target, containerID) + c.mu.Lock() + state := c.currentNodeState() + if state.swarmNode == nil || state.swarmNode.Agent() == nil { + c.mu.Unlock() + return nil, errors.New("invalid cluster node while attaching to network") + } + if attacher, ok := c.attachers[aKey]; ok { + c.mu.Unlock() + return attacher.config, nil + } + + agent := state.swarmNode.Agent() + attachWaitCh := make(chan *network.NetworkingConfig) + detachWaitCh := make(chan struct{}) + attachCompleteCh := make(chan struct{}) + c.attachers[aKey] = &attacher{ + attachWaitCh: attachWaitCh, + attachCompleteCh: attachCompleteCh, + detachWaitCh: detachWaitCh, + } + c.mu.Unlock() + + ctx, cancel := c.getRequestContext() + defer cancel() + + taskID, err := agent.ResourceAllocator().AttachNetwork(ctx, containerID, target, addresses) + if err != nil { + c.mu.Lock() + delete(c.attachers, aKey) + c.mu.Unlock() + return nil, fmt.Errorf("Could not attach to network %s: %v", target, err) + } + + c.mu.Lock() + c.attachers[aKey].taskID = taskID + close(attachCompleteCh) + c.mu.Unlock() + + logrus.Debugf("Successfully attached to network %s with task id %s", target, taskID) + + release := func() { + ctx, cancel := c.getRequestContext() + defer cancel() + if err := agent.ResourceAllocator().DetachNetwork(ctx, taskID); err != nil { + logrus.Errorf("Failed remove network attachment %s to network %s on allocation failure: %v", + taskID, target, err) + } + } + + var config *network.NetworkingConfig + select { + case config = <-attachWaitCh: + case <-ctx.Done(): + release() + return nil, fmt.Errorf("attaching to network failed, make sure your network options are correct and check manager logs: %v", ctx.Err()) + } + + c.mu.Lock() + c.attachers[aKey].config = config + c.mu.Unlock() + + logrus.Debugf("Successfully allocated resources on network %s for task id %s", target, taskID) + + return config, nil +} + +// DetachNetwork unblocks the waiters waiting on WaitForDetachment so +// that a request to detach can be generated towards the manager. +func (c *Cluster) DetachNetwork(target string, containerID string) error { + aKey := attacherKey(target, containerID) + + c.mu.Lock() + attacher, ok := c.attachers[aKey] + delete(c.attachers, aKey) + c.mu.Unlock() + + if !ok { + return fmt.Errorf("could not find network attachment for container %s to network %s", containerID, target) + } + + close(attacher.detachWaitCh) + return nil +} + +// CreateNetwork creates a new cluster managed network. +func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) { + if runconfig.IsPreDefinedNetwork(s.Name) { + err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name) + return "", apierrors.NewRequestForbiddenError(err) + } + + var resp *swarmapi.CreateNetworkResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + networkSpec := convert.BasicNetworkCreateToGRPC(s) + r, err := state.controlClient.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + + return resp.Network.ID, nil +} + +// RemoveNetwork removes a cluster network. +func (c *Cluster) RemoveNetwork(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + network, err := getNetwork(ctx, state.controlClient, input) + if err != nil { + return err + } + + _, err = state.controlClient.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}) + return err + }) +} + +func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error { + // Always prefer NetworkAttachmentConfigs from TaskTemplate + // but fallback to service spec for backward compatibility + networks := s.TaskTemplate.Networks + if len(networks) == 0 { + networks = s.Networks + } + for i, n := range networks { + apiNetwork, err := getNetwork(ctx, client, n.Target) + if err != nil { + ln, _ := c.config.Backend.FindNetwork(n.Target) + if ln != nil && runconfig.IsPreDefinedNetwork(ln.Name()) { + // Need to retrieve the corresponding predefined swarm network + // and use its id for the request. + apiNetwork, err = getNetwork(ctx, client, ln.Name()) + if err != nil { + err = fmt.Errorf("could not find the corresponding predefined swarm network: %v", err) + return apierrors.NewRequestNotFoundError(err) + } + goto setid + } + if ln != nil && !ln.Info().Dynamic() { + err = fmt.Errorf("The network %s cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.", ln.Name()) + return apierrors.NewRequestForbiddenError(err) + } + return err + } + setid: + networks[i].Target = apiNetwork.ID + } + return nil +} diff --git a/components/engine/daemon/cluster/noderunner.go b/components/engine/daemon/cluster/noderunner.go new file mode 100644 index 0000000000..2ec13b4639 --- /dev/null +++ b/components/engine/daemon/cluster/noderunner.go @@ -0,0 +1,362 @@ +package cluster + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/executor/container" + lncluster "github.com/docker/libnetwork/cluster" + swarmapi "github.com/docker/swarmkit/api" + swarmnode "github.com/docker/swarmkit/node" + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +// nodeRunner implements a manager for continuously running swarmkit node, restarting them with backoff delays if needed. +type nodeRunner struct { + nodeState + mu sync.RWMutex + done chan struct{} // closed when swarmNode exits + ready chan struct{} // closed when swarmNode becomes active + reconnectDelay time.Duration + config nodeStartConfig + + repeatedRun bool + cancelReconnect func() + stopping bool + cluster *Cluster // only for accessing config helpers, never call any methods. TODO: change to config struct +} + +// nodeStartConfig holds configuration needed to start a new node. Exported +// fields of this structure are saved to disk in json. Unexported fields +// contain data that shouldn't be persisted between daemon reloads. +type nodeStartConfig struct { + // LocalAddr is this machine's local IP or hostname, if specified. + LocalAddr string + // RemoteAddr is the address that was given to "swarm join". It is used + // to find LocalAddr if necessary. + RemoteAddr string + // ListenAddr is the address we bind to, including a port. + ListenAddr string + // AdvertiseAddr is the address other nodes should connect to, + // including a port. + AdvertiseAddr string + // DataPathAddr is the address that has to be used for the data path + DataPathAddr string + + joinAddr string + forceNewCluster bool + joinToken string + lockKey []byte + autolock bool + availability types.NodeAvailability +} + +func (n *nodeRunner) Ready() chan error { + c := make(chan error, 1) + n.mu.RLock() + ready, done := n.ready, n.done + n.mu.RUnlock() + go func() { + select { + case <-ready: + case <-done: + } + select { + case <-ready: + default: + n.mu.RLock() + c <- n.err + n.mu.RUnlock() + } + close(c) + }() + return c +} + +func (n *nodeRunner) Start(conf nodeStartConfig) error { + n.mu.Lock() + defer n.mu.Unlock() + + n.reconnectDelay = initialReconnectDelay + + return n.start(conf) +} + +func (n *nodeRunner) start(conf nodeStartConfig) error { + var control string + if runtime.GOOS == "windows" { + control = `\\.\pipe\` + controlSocket + } else { + control = filepath.Join(n.cluster.runtimeRoot, controlSocket) + } + + // Hostname is not set here. Instead, it is obtained from + // the node description that is reported periodically + swarmnodeConfig := swarmnode.Config{ + ForceNewCluster: conf.forceNewCluster, + ListenControlAPI: control, + ListenRemoteAPI: conf.ListenAddr, + AdvertiseRemoteAPI: conf.AdvertiseAddr, + JoinAddr: conf.joinAddr, + StateDir: n.cluster.root, + JoinToken: conf.joinToken, + Executor: container.NewExecutor(n.cluster.config.Backend), + HeartbeatTick: 1, + ElectionTick: 3, + UnlockKey: conf.lockKey, + AutoLockManagers: conf.autolock, + PluginGetter: n.cluster.config.Backend.PluginGetter(), + } + if conf.availability != "" { + avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))] + if !ok { + return fmt.Errorf("invalid Availability: %q", conf.availability) + } + swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail) + } + node, err := swarmnode.New(&swarmnodeConfig) + if err != nil { + return err + } + if err := node.Start(context.Background()); err != nil { + return err + } + + n.done = make(chan struct{}) + n.ready = make(chan struct{}) + n.swarmNode = node + n.config = conf + savePersistentState(n.cluster.root, conf) + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + n.handleNodeExit(node) + cancel() + }() + + go n.handleReadyEvent(ctx, node, n.ready) + go n.handleControlSocketChange(ctx, node) + + return nil +} + +func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmnode.Node) { + for conn := range node.ListenControlSocket(ctx) { + n.mu.Lock() + if n.grpcConn != conn { + if conn == nil { + n.controlClient = nil + n.logsClient = nil + } else { + n.controlClient = swarmapi.NewControlClient(conn) + n.logsClient = swarmapi.NewLogsClient(conn) + // push store changes to daemon + go n.watchClusterEvents(ctx, conn) + } + } + n.grpcConn = conn + n.mu.Unlock() + n.cluster.SendClusterEvent(lncluster.EventSocketChange) + } +} + +func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientConn) { + client := swarmapi.NewWatchClient(conn) + watch, err := client.Watch(ctx, &swarmapi.WatchRequest{ + Entries: []*swarmapi.WatchRequest_WatchEntry{ + { + Kind: "node", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "service", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "network", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "secret", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + }, + IncludeOldObject: true, + }) + if err != nil { + logrus.WithError(err).Error("failed to watch cluster store") + return + } + for { + msg, err := watch.Recv() + if err != nil { + // store watch is broken + logrus.WithError(err).Error("failed to receive changes from store watch API") + return + } + select { + case <-ctx.Done(): + return + case n.cluster.watchStream <- msg: + } + } +} + +func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, ready chan struct{}) { + select { + case <-node.Ready(): + n.mu.Lock() + n.err = nil + n.mu.Unlock() + close(ready) + case <-ctx.Done(): + } + n.cluster.SendClusterEvent(lncluster.EventNodeReady) +} + +func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) { + err := detectLockedError(node.Err(context.Background())) + if err != nil { + logrus.Errorf("cluster exited with error: %v", err) + } + n.mu.Lock() + n.swarmNode = nil + n.err = err + close(n.done) + select { + case <-n.ready: + n.enableReconnectWatcher() + default: + if n.repeatedRun { + n.enableReconnectWatcher() + } + } + n.repeatedRun = true + n.mu.Unlock() +} + +// Stop stops the current swarm node if it is running. +func (n *nodeRunner) Stop() error { + n.mu.Lock() + if n.cancelReconnect != nil { // between restarts + n.cancelReconnect() + n.cancelReconnect = nil + } + if n.swarmNode == nil { + n.mu.Unlock() + return nil + } + n.stopping = true + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + n.mu.Unlock() + if err := n.swarmNode.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") { + return err + } + n.cluster.SendClusterEvent(lncluster.EventNodeLeave) + <-n.done + return nil +} + +func (n *nodeRunner) State() nodeState { + if n == nil { + return nodeState{status: types.LocalNodeStateInactive} + } + n.mu.RLock() + defer n.mu.RUnlock() + + ns := n.nodeState + + if ns.err != nil || n.cancelReconnect != nil { + if errors.Cause(ns.err) == errSwarmLocked { + ns.status = types.LocalNodeStateLocked + } else { + ns.status = types.LocalNodeStateError + } + } else { + select { + case <-n.ready: + ns.status = types.LocalNodeStateActive + default: + ns.status = types.LocalNodeStatePending + } + } + + return ns +} + +func (n *nodeRunner) enableReconnectWatcher() { + if n.stopping { + return + } + n.reconnectDelay *= 2 + if n.reconnectDelay > maxReconnectDelay { + n.reconnectDelay = maxReconnectDelay + } + logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds()) + delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay) + n.cancelReconnect = cancel + + config := n.config + go func() { + <-delayCtx.Done() + if delayCtx.Err() != context.DeadlineExceeded { + return + } + n.mu.Lock() + defer n.mu.Unlock() + if n.stopping { + return + } + remotes := n.cluster.getRemoteAddressList() + if len(remotes) > 0 { + config.RemoteAddr = remotes[0] + } else { + config.RemoteAddr = "" + } + + config.joinAddr = config.RemoteAddr + if err := n.start(config); err != nil { + n.err = err + } + }() +} + +// nodeState represents information about the current state of the cluster and +// provides access to the grpc clients. +type nodeState struct { + swarmNode *swarmnode.Node + grpcConn *grpc.ClientConn + controlClient swarmapi.ControlClient + logsClient swarmapi.LogsClient + status types.LocalNodeState + actualLocalAddr string + err error +} + +// IsActiveManager returns true if node is a manager ready to accept control requests. It is safe to access the client properties if this returns true. +func (ns nodeState) IsActiveManager() bool { + return ns.controlClient != nil +} + +// IsManager returns true if node is a manager. +func (ns nodeState) IsManager() bool { + return ns.swarmNode != nil && ns.swarmNode.Manager() != nil +} + +// NodeID returns node's ID or empty string if node is inactive. +func (ns nodeState) NodeID() string { + if ns.swarmNode != nil { + return ns.swarmNode.NodeID() + } + return "" +} diff --git a/components/engine/daemon/cluster/nodes.go b/components/engine/daemon/cluster/nodes.go new file mode 100644 index 0000000000..839c8f78e5 --- /dev/null +++ b/components/engine/daemon/cluster/nodes.go @@ -0,0 +1,104 @@ +package cluster + +import ( + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// GetNodes returns a list of all nodes known to a cluster. +func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + filters, err := newListNodesFilters(options.Filters) + if err != nil { + return nil, err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListNodes( + ctx, + &swarmapi.ListNodesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + nodes := make([]types.Node, 0, len(r.Nodes)) + + for _, node := range r.Nodes { + nodes = append(nodes, convert.NodeFromGRPC(*node)) + } + return nodes, nil +} + +// GetNode returns a node based on an ID. +func (c *Cluster) GetNode(input string) (types.Node, error) { + var node *swarmapi.Node + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + n, err := getNode(ctx, state.controlClient, input) + if err != nil { + return err + } + node = n + return nil + }); err != nil { + return types.Node{}, err + } + + return convert.NodeFromGRPC(*node), nil +} + +// UpdateNode updates existing nodes properties. +func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + nodeSpec, err := convert.NodeSpecToGRPC(spec) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + currentNode, err := getNode(ctx, state.controlClient, input) + if err != nil { + return err + } + + _, err = state.controlClient.UpdateNode( + ctx, + &swarmapi.UpdateNodeRequest{ + NodeID: currentNode.ID, + Spec: &nodeSpec, + NodeVersion: &swarmapi.Version{ + Index: version, + }, + }, + ) + return err + }) +} + +// RemoveNode removes a node from a cluster +func (c *Cluster) RemoveNode(input string, force bool) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + node, err := getNode(ctx, state.controlClient, input) + if err != nil { + return err + } + + _, err = state.controlClient.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force}) + return err + }) +} diff --git a/components/engine/daemon/cluster/provider/network.go b/components/engine/daemon/cluster/provider/network.go new file mode 100644 index 0000000000..f4c72ae13b --- /dev/null +++ b/components/engine/daemon/cluster/provider/network.go @@ -0,0 +1,37 @@ +package provider + +import "github.com/docker/docker/api/types" + +// NetworkCreateRequest is a request when creating a network. +type NetworkCreateRequest struct { + ID string + types.NetworkCreateRequest +} + +// NetworkCreateResponse is a response when creating a network. +type NetworkCreateResponse struct { + ID string `json:"Id"` +} + +// VirtualAddress represents a virtual address. +type VirtualAddress struct { + IPv4 string + IPv6 string +} + +// PortConfig represents a port configuration. +type PortConfig struct { + Name string + Protocol int32 + TargetPort uint32 + PublishedPort uint32 +} + +// ServiceConfig represents a service configuration. +type ServiceConfig struct { + ID string + Name string + Aliases map[string][]string + VirtualAddresses map[string]*VirtualAddress + ExposedPorts []*PortConfig +} diff --git a/components/engine/daemon/cluster/secrets.go b/components/engine/daemon/cluster/secrets.go new file mode 100644 index 0000000000..3947286cba --- /dev/null +++ b/components/engine/daemon/cluster/secrets.go @@ -0,0 +1,117 @@ +package cluster + +import ( + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// GetSecret returns a secret from a managed swarm cluster +func (c *Cluster) GetSecret(input string) (types.Secret, error) { + var secret *swarmapi.Secret + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getSecret(ctx, state.controlClient, input) + if err != nil { + return err + } + secret = s + return nil + }); err != nil { + return types.Secret{}, err + } + return convert.SecretFromGRPC(secret), nil +} + +// GetSecrets returns all secrets of a managed swarm cluster. +func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + filters, err := newListSecretsFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListSecrets(ctx, + &swarmapi.ListSecretsRequest{Filters: filters}) + if err != nil { + return nil, err + } + + secrets := make([]types.Secret, 0, len(r.Secrets)) + + for _, secret := range r.Secrets { + secrets = append(secrets, convert.SecretFromGRPC(secret)) + } + + return secrets, nil +} + +// CreateSecret creates a new secret in a managed swarm cluster. +func (c *Cluster) CreateSecret(s types.SecretSpec) (string, error) { + var resp *swarmapi.CreateSecretResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + secretSpec := convert.SecretSpecToGRPC(s) + + r, err := state.controlClient.CreateSecret(ctx, + &swarmapi.CreateSecretRequest{Spec: &secretSpec}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + return resp.Secret.ID, nil +} + +// RemoveSecret removes a secret from a managed swarm cluster. +func (c *Cluster) RemoveSecret(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + secret, err := getSecret(ctx, state.controlClient, input) + if err != nil { + return err + } + + req := &swarmapi.RemoveSecretRequest{ + SecretID: secret.ID, + } + + _, err = state.controlClient.RemoveSecret(ctx, req) + return err + }) +} + +// UpdateSecret updates a secret in a managed swarm cluster. +// Note: this is not exposed to the CLI but is available from the API only +func (c *Cluster) UpdateSecret(input string, version uint64, spec types.SecretSpec) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + secret, err := getSecret(ctx, state.controlClient, input) + if err != nil { + return err + } + + secretSpec := convert.SecretSpecToGRPC(spec) + + _, err = state.controlClient.UpdateSecret(ctx, + &swarmapi.UpdateSecretRequest{ + SecretID: secret.ID, + SecretVersion: &swarmapi.Version{ + Index: version, + }, + Spec: &secretSpec, + }) + return err + }) +} diff --git a/components/engine/daemon/cluster/services.go b/components/engine/daemon/cluster/services.go new file mode 100644 index 0000000000..f4416c24c3 --- /dev/null +++ b/components/engine/daemon/cluster/services.go @@ -0,0 +1,552 @@ +package cluster + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + types "github.com/docker/docker/api/types/swarm" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/daemon/cluster/convert" + runconfigopts "github.com/docker/docker/runconfig/opts" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// GetServices returns all services of a managed swarm cluster. +func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + // We move the accepted filter check here as "mode" filter + // is processed in the daemon, not in SwarmKit. So it might + // be good to have accepted file check in the same file as + // the filter processing (in the for loop below). + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "mode": true, + "runtime": true, + } + if err := options.Filters.Validate(accepted); err != nil { + return nil, err + } + + filters := &swarmapi.ListServicesRequest_Filters{ + NamePrefixes: options.Filters.Get("name"), + IDPrefixes: options.Filters.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")), + // (ehazlett): hardcode runtime for now. eventually we will + // be able to filter for the desired runtimes once more + // are supported. + Runtimes: []string{string(types.RuntimeContainer)}, + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListServices( + ctx, + &swarmapi.ListServicesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + services := make([]types.Service, 0, len(r.Services)) + + for _, service := range r.Services { + if options.Filters.Include("mode") { + var mode string + switch service.Spec.GetMode().(type) { + case *swarmapi.ServiceSpec_Global: + mode = "global" + case *swarmapi.ServiceSpec_Replicated: + mode = "replicated" + } + + if !options.Filters.ExactMatch("mode", mode) { + continue + } + } + svcs, err := convert.ServiceFromGRPC(*service) + if err != nil { + return nil, err + } + services = append(services, svcs) + } + + return services, nil +} + +// GetService returns a service based on an ID or name. +func (c *Cluster) GetService(input string, insertDefaults bool) (types.Service, error) { + var service *swarmapi.Service + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getService(ctx, state.controlClient, input, insertDefaults) + if err != nil { + return err + } + service = s + return nil + }); err != nil { + return types.Service{}, err + } + svc, err := convert.ServiceFromGRPC(*service) + if err != nil { + return types.Service{}, err + } + return svc, nil +} + +// CreateService creates a new service in a managed swarm cluster. +func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRegistry bool) (*apitypes.ServiceCreateResponse, error) { + var resp *apitypes.ServiceCreateResponse + err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + err := c.populateNetworkID(ctx, state.controlClient, &s) + if err != nil { + return err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(s) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + resp = &apitypes.ServiceCreateResponse{} + + switch serviceSpec.Task.Runtime.(type) { + // handle other runtimes here + case *swarmapi.TaskSpec_Container: + ctnr := serviceSpec.Task.GetContainer() + if ctnr == nil { + return errors.New("service does not use container tasks") + } + if encodedAuth != "" { + ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + // pin image by digest for API versions < 1.30 + // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" + // should be removed in the future. Since integration tests only use the + // latest API version, so this is no longer required. + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { + digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) + // warning in the client response should be concise + resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image)) + + } else if ctnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) + ctnr.Image = digestImage + + } else { + logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image) + + } + + // Replace the context with a fresh one. + // If we timed out while communicating with the + // registry, then "ctx" will already be expired, which + // would cause UpdateService below to fail. Reusing + // "ctx" could make it impossible to create a service + // if the registry is slow or unresponsive. + var cancel func() + ctx, cancel = c.getRequestContext() + defer cancel() + } + + r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) + if err != nil { + return err + } + + resp.ID = r.Service.ID + } + return nil + }) + + return resp, err +} + +// UpdateService updates existing service to match new properties. +func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, flags apitypes.ServiceUpdateOptions, queryRegistry bool) (*apitypes.ServiceUpdateResponse, error) { + var resp *apitypes.ServiceUpdateResponse + + err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + + err := c.populateNetworkID(ctx, state.controlClient, &spec) + if err != nil { + return err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(spec) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false) + if err != nil { + return err + } + + newCtnr := serviceSpec.Task.GetContainer() + if newCtnr == nil { + return errors.New("service does not use container tasks") + } + + encodedAuth := flags.EncodedRegistryAuth + if encodedAuth != "" { + newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } else { + // this is needed because if the encodedAuth isn't being updated then we + // shouldn't lose it, and continue to use the one that was already present + var ctnr *swarmapi.ContainerSpec + switch flags.RegistryAuthFrom { + case apitypes.RegistryAuthFromSpec, "": + ctnr = currentService.Spec.Task.GetContainer() + case apitypes.RegistryAuthFromPreviousSpec: + if currentService.PreviousSpec == nil { + return errors.New("service does not have a previous spec") + } + ctnr = currentService.PreviousSpec.Task.GetContainer() + default: + return errors.New("unsupported registryAuthFrom value") + } + if ctnr == nil { + return errors.New("service does not use container tasks") + } + newCtnr.PullOptions = ctnr.PullOptions + // update encodedAuth so it can be used to pin image by digest + if ctnr.PullOptions != nil { + encodedAuth = ctnr.PullOptions.RegistryAuth + } + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + resp = &apitypes.ServiceUpdateResponse{} + + // pin image by digest for API versions < 1.30 + // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" + // should be removed in the future. Since integration tests only use the + // latest API version, so this is no longer required. + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { + digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) + // warning in the client response should be concise + resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image)) + } else if newCtnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) + newCtnr.Image = digestImage + } else { + logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image) + } + + // Replace the context with a fresh one. + // If we timed out while communicating with the + // registry, then "ctx" will already be expired, which + // would cause UpdateService below to fail. Reusing + // "ctx" could make it impossible to update a service + // if the registry is slow or unresponsive. + var cancel func() + ctx, cancel = c.getRequestContext() + defer cancel() + } + + var rollback swarmapi.UpdateServiceRequest_Rollback + switch flags.Rollback { + case "", "none": + rollback = swarmapi.UpdateServiceRequest_NONE + case "previous": + rollback = swarmapi.UpdateServiceRequest_PREVIOUS + default: + return fmt.Errorf("unrecognized rollback option %s", flags.Rollback) + } + + _, err = state.controlClient.UpdateService( + ctx, + &swarmapi.UpdateServiceRequest{ + ServiceID: currentService.ID, + Spec: &serviceSpec, + ServiceVersion: &swarmapi.Version{ + Index: version, + }, + Rollback: rollback, + }, + ) + return err + }) + return resp, err +} + +// RemoveService removes a service from a managed swarm cluster. +func (c *Cluster) RemoveService(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + service, err := getService(ctx, state.controlClient, input, false) + if err != nil { + return err + } + + _, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID}) + return err + }) +} + +// ServiceLogs collects service logs and writes them back to `config.OutStream` +func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *apitypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + swarmSelector, err := convertSelector(ctx, state.controlClient, selector) + if err != nil { + return nil, errors.Wrap(err, "error making log selector") + } + + // set the streams we'll use + stdStreams := []swarmapi.LogStream{} + if config.ShowStdout { + stdStreams = append(stdStreams, swarmapi.LogStreamStdout) + } + if config.ShowStderr { + stdStreams = append(stdStreams, swarmapi.LogStreamStderr) + } + + // Get tail value squared away - the number of previous log lines we look at + var tail int64 + // in ContainerLogs, if the tail value is ANYTHING non-integer, we just set + // it to -1 (all). i don't agree with that, but i also think no tail value + // should be legitimate. if you don't pass tail, we assume you want "all" + if config.Tail == "all" || config.Tail == "" { + // tail of 0 means send all logs on the swarmkit side + tail = 0 + } else { + t, err := strconv.Atoi(config.Tail) + if err != nil { + return nil, errors.New("tail value must be a positive integer or \"all\"") + } + if t < 0 { + return nil, errors.New("negative tail values not supported") + } + // we actually use negative tail in swarmkit to represent messages + // backwards starting from the beginning. also, -1 means no logs. so, + // basically, for api compat with docker container logs, add one and + // flip the sign. we error above if you try to negative tail, which + // isn't supported by docker (and would error deeper in the stack + // anyway) + // + // See the logs protobuf for more information + tail = int64(-(t + 1)) + } + + // get the since value - the time in the past we're looking at logs starting from + var sinceProto *gogotypes.Timestamp + if config.Since != "" { + s, n, err := timetypes.ParseTimestamps(config.Since, 0) + if err != nil { + return nil, errors.Wrap(err, "could not parse since timestamp") + } + since := time.Unix(s, n) + sinceProto, err = gogotypes.TimestampProto(since) + if err != nil { + return nil, errors.Wrap(err, "could not parse timestamp to proto") + } + } + + stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{ + Selector: swarmSelector, + Options: &swarmapi.LogSubscriptionOptions{ + Follow: config.Follow, + Streams: stdStreams, + Tail: tail, + Since: sinceProto, + }, + }) + if err != nil { + return nil, err + } + + messageChan := make(chan *backend.LogMessage, 1) + go func() { + defer close(messageChan) + for { + // Check the context before doing anything. + select { + case <-ctx.Done(): + return + default: + } + subscribeMsg, err := stream.Recv() + if err == io.EOF { + return + } + // if we're not io.EOF, push the message in and return + if err != nil { + select { + case <-ctx.Done(): + case messageChan <- &backend.LogMessage{Err: err}: + } + return + } + + for _, msg := range subscribeMsg.Messages { + // make a new message + m := new(backend.LogMessage) + m.Attrs = make(backend.LogAttributes) + // add the timestamp, adding the error if it fails + m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp) + if err != nil { + m.Err = err + } + // copy over all of the details + for _, d := range msg.Attrs { + m.Attrs[d.Key] = d.Value + } + // we have the final say over context details (in case there + // is a conflict (if the user added a detail with a context's + // key for some reason)) + m.Attrs[contextPrefix+".node.id"] = msg.Context.NodeID + m.Attrs[contextPrefix+".service.id"] = msg.Context.ServiceID + m.Attrs[contextPrefix+".task.id"] = msg.Context.TaskID + + switch msg.Stream { + case swarmapi.LogStreamStdout: + m.Source = "stdout" + case swarmapi.LogStreamStderr: + m.Source = "stderr" + } + m.Line = msg.Data + + // there could be a case where the reader stops accepting + // messages and the context is canceled. we need to check that + // here, or otherwise we risk blocking forever on the message + // send. + select { + case <-ctx.Done(): + return + case messageChan <- m: + } + } + } + }() + return messageChan, nil +} + +// convertSelector takes a backend.LogSelector, which contains raw names that +// may or may not be valid, and converts them to an api.LogSelector proto. It +// returns an error if something fails +func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) { + // don't rely on swarmkit to resolve IDs, do it ourselves + swarmSelector := &swarmapi.LogSelector{} + for _, s := range selector.Services { + service, err := getService(ctx, cc, s, false) + if err != nil { + return nil, err + } + c := service.Spec.Task.GetContainer() + if c == nil { + return nil, errors.New("logs only supported on container tasks") + } + swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID) + } + for _, t := range selector.Tasks { + task, err := getTask(ctx, cc, t) + if err != nil { + return nil, err + } + c := task.Spec.GetContainer() + if c == nil { + return nil, errors.New("logs only supported on container tasks") + } + swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID) + } + return swarmSelector, nil +} + +// imageWithDigestString takes an image such as name or name:tag +// and returns the image pinned to a digest, such as name@sha256:34234 +func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) { + ref, err := reference.ParseAnyReference(image) + if err != nil { + return "", err + } + namedRef, ok := ref.(reference.Named) + if !ok { + if _, ok := ref.(reference.Digested); ok { + return image, nil + } + return "", errors.Errorf("unknown image reference format: %s", image) + } + // only query registry if not a canonical reference (i.e. with digest) + if _, ok := namedRef.(reference.Canonical); !ok { + namedRef = reference.TagNameOnly(namedRef) + + taggedRef, ok := namedRef.(reference.NamedTagged) + if !ok { + return "", errors.Errorf("image reference not tagged: %s", image) + } + + repo, _, err := c.config.Backend.GetRepository(ctx, taggedRef, authConfig) + if err != nil { + return "", err + } + dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag()) + if err != nil { + return "", err + } + + namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest) + if err != nil { + return "", err + } + // return familiar form until interface updated to return type + return reference.FamiliarString(namedDigestedRef), nil + } + // reference already contains a digest, so just return it + return reference.FamiliarString(ref), nil +} + +// digestWarning constructs a formatted warning string +// using the image name that could not be pinned by digest. The +// formatting is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} diff --git a/components/engine/daemon/cluster/swarm.go b/components/engine/daemon/cluster/swarm.go new file mode 100644 index 0000000000..8a8d5bf808 --- /dev/null +++ b/components/engine/daemon/cluster/swarm.go @@ -0,0 +1,554 @@ +package cluster + +import ( + "fmt" + "net" + "strings" + "time" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/signal" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/encryption" + swarmnode "github.com/docker/swarmkit/node" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// Init initializes new cluster from user provided request. +func (c *Cluster) Init(req types.InitRequest) (string, error) { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + if c.nr != nil { + if req.ForceNewCluster { + // Take c.mu temporarily to wait for presently running + // API handlers to finish before shutting down the node. + c.mu.Lock() + c.mu.Unlock() + + if err := c.nr.Stop(); err != nil { + return "", err + } + } else { + return "", errSwarmExists + } + } + + if err := validateAndSanitizeInitRequest(&req); err != nil { + return "", apierrors.NewBadRequestError(err) + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + return "", err + } + + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + if err != nil { + return "", err + } + + dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) + if err != nil { + return "", err + } + + localAddr := listenHost + + // If the local address is undetermined, the advertise address + // will be used as local address, if it belongs to this system. + // If the advertise address is not local, then we try to find + // a system address to use as local address. If this fails, + // we give up and ask the user to pass the listen address. + if net.ParseIP(localAddr).IsUnspecified() { + advertiseIP := net.ParseIP(advertiseHost) + + found := false + for _, systemIP := range listSystemIPs() { + if systemIP.Equal(advertiseIP) { + localAddr = advertiseIP.String() + found = true + break + } + } + + if !found { + ip, err := c.resolveSystemAddr() + if err != nil { + logrus.Warnf("Could not find a local address: %v", err) + return "", errMustSpecifyListenAddr + } + localAddr = ip.String() + } + } + + if !req.ForceNewCluster { + clearPersistentState(c.root) + } + + nr, err := c.newNodeRunner(nodeStartConfig{ + forceNewCluster: req.ForceNewCluster, + autolock: req.AutoLockManagers, + LocalAddr: localAddr, + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort), + DataPathAddr: dataPathAddr, + availability: req.Availability, + }) + if err != nil { + return "", err + } + c.mu.Lock() + c.nr = nr + c.mu.Unlock() + + if err := <-nr.Ready(); err != nil { + if !req.ForceNewCluster { // if failure on first attempt don't keep state + if err := clearPersistentState(c.root); err != nil { + return "", err + } + } + if err != nil { + c.mu.Lock() + c.nr = nil + c.mu.Unlock() + } + return "", err + } + state := nr.State() + if state.swarmNode == nil { // should never happen but protect from panic + return "", errors.New("invalid cluster state for spec initialization") + } + if err := initClusterSpec(state.swarmNode, req.Spec); err != nil { + return "", err + } + return state.NodeID(), nil +} + +// Join makes current Cluster part of an existing swarm cluster. +func (c *Cluster) Join(req types.JoinRequest) error { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + c.mu.Lock() + if c.nr != nil { + c.mu.Unlock() + return errSwarmExists + } + c.mu.Unlock() + + if err := validateAndSanitizeJoinRequest(&req); err != nil { + return apierrors.NewBadRequestError(err) + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + return err + } + + var advertiseAddr string + if req.AdvertiseAddr != "" { + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + // For joining, we don't need to provide an advertise address, + // since the remote side can detect it. + if err == nil { + advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort) + } + } + + dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) + if err != nil { + return err + } + + clearPersistentState(c.root) + + nr, err := c.newNodeRunner(nodeStartConfig{ + RemoteAddr: req.RemoteAddrs[0], + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: advertiseAddr, + DataPathAddr: dataPathAddr, + joinAddr: req.RemoteAddrs[0], + joinToken: req.JoinToken, + availability: req.Availability, + }) + if err != nil { + return err + } + + c.mu.Lock() + c.nr = nr + c.mu.Unlock() + + select { + case <-time.After(swarmConnectTimeout): + return errSwarmJoinTimeoutReached + case err := <-nr.Ready(): + if err != nil { + c.mu.Lock() + c.nr = nil + c.mu.Unlock() + } + return err + } +} + +// Inspect retrieves the configuration properties of a managed swarm cluster. +func (c *Cluster) Inspect() (types.Swarm, error) { + var swarm *swarmapi.Cluster + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getSwarm(ctx, state.controlClient) + if err != nil { + return err + } + swarm = s + return nil + }); err != nil { + return types.Swarm{}, err + } + return convert.SwarmFromGRPC(*swarm), nil +} + +// Update updates configuration of a managed swarm cluster. +func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + swarm, err := getSwarm(ctx, state.controlClient) + if err != nil { + return err + } + + // In update, client should provide the complete spec of the swarm, including + // Name and Labels. If a field is specified with 0 or nil, then the default value + // will be used to swarmkit. + clusterSpec, err := convert.SwarmSpecToGRPC(spec) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + _, err = state.controlClient.UpdateCluster( + ctx, + &swarmapi.UpdateClusterRequest{ + ClusterID: swarm.ID, + Spec: &clusterSpec, + ClusterVersion: &swarmapi.Version{ + Index: version, + }, + Rotation: swarmapi.KeyRotation{ + WorkerJoinToken: flags.RotateWorkerToken, + ManagerJoinToken: flags.RotateManagerToken, + ManagerUnlockKey: flags.RotateManagerUnlockKey, + }, + }, + ) + return err + }) +} + +// GetUnlockKey returns the unlock key for the swarm. +func (c *Cluster) GetUnlockKey() (string, error) { + var resp *swarmapi.GetUnlockKeyResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + client := swarmapi.NewCAClient(state.grpcConn) + + r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + if len(resp.UnlockKey) == 0 { + // no key + return "", nil + } + return encryption.HumanReadableKey(resp.UnlockKey), nil +} + +// UnlockSwarm provides a key to decrypt data that is encrypted at rest. +func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + + c.mu.RLock() + state := c.currentNodeState() + + if !state.IsActiveManager() { + // when manager is not active, + // unless it is locked, otherwise return error. + if err := c.errNoManager(state); err != errSwarmLocked { + c.mu.RUnlock() + return err + } + } else { + // when manager is active, return an error of "not locked" + c.mu.RUnlock() + return errors.New("swarm is not locked") + } + + // only when swarm is locked, code running reaches here + nr := c.nr + c.mu.RUnlock() + + key, err := encryption.ParseHumanReadableKey(req.UnlockKey) + if err != nil { + return err + } + + config := nr.config + config.lockKey = key + if err := nr.Stop(); err != nil { + return err + } + nr, err = c.newNodeRunner(config) + if err != nil { + return err + } + + c.mu.Lock() + c.nr = nr + c.mu.Unlock() + + if err := <-nr.Ready(); err != nil { + if errors.Cause(err) == errSwarmLocked { + return errors.New("swarm could not be unlocked: invalid key provided") + } + return fmt.Errorf("swarm component could not be started: %v", err) + } + return nil +} + +// Leave shuts down Cluster and removes current state. +func (c *Cluster) Leave(force bool) error { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + + c.mu.Lock() + nr := c.nr + if nr == nil { + c.mu.Unlock() + return errNoSwarm + } + + state := c.currentNodeState() + + c.mu.Unlock() + + if errors.Cause(state.err) == errSwarmLocked && !force { + // leave a locked swarm without --force is not allowed + return errors.New("Swarm is encrypted and locked. Please unlock it first or use `--force` to ignore this message.") + } + + if state.IsManager() && !force { + msg := "You are attempting to leave the swarm on a node that is participating as a manager. " + if state.IsActiveManager() { + active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID()) + if err == nil { + if active && removingManagerCausesLossOfQuorum(reachable, unreachable) { + if isLastManager(reachable, unreachable) { + msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. " + return errors.New(msg) + } + msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable) + } + } + } else { + msg += "Doing so may lose the consensus of your cluster. " + } + + msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message." + return errors.New(msg) + } + // release readers in here + if err := nr.Stop(); err != nil { + logrus.Errorf("failed to shut down cluster node: %v", err) + signal.DumpStacks("") + return err + } + + c.mu.Lock() + c.nr = nil + c.mu.Unlock() + + if nodeID := state.NodeID(); nodeID != "" { + nodeContainers, err := c.listContainerForNode(nodeID) + if err != nil { + return err + } + for _, id := range nodeContainers { + if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { + logrus.Errorf("error removing %v: %v", id, err) + } + } + } + + // todo: cleanup optional? + if err := clearPersistentState(c.root); err != nil { + return err + } + c.config.Backend.DaemonLeavesCluster() + return nil +} + +// Info returns information about the current cluster state. +func (c *Cluster) Info() types.Info { + info := types.Info{ + NodeAddr: c.GetAdvertiseAddress(), + } + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + info.LocalNodeState = state.status + if state.err != nil { + info.Error = state.err.Error() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + if state.IsActiveManager() { + info.ControlAvailable = true + swarm, err := c.Inspect() + if err != nil { + info.Error = err.Error() + } + + info.Cluster = &swarm.ClusterInfo + + if r, err := state.controlClient.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err != nil { + info.Error = err.Error() + } else { + info.Nodes = len(r.Nodes) + for _, n := range r.Nodes { + if n.ManagerStatus != nil { + info.Managers = info.Managers + 1 + } + } + } + } + + if state.swarmNode != nil { + for _, r := range state.swarmNode.Remotes() { + info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr}) + } + info.NodeID = state.swarmNode.NodeID() + } + + return info +} + +func validateAndSanitizeInitRequest(req *types.InitRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + + if req.Spec.Annotations.Name == "" { + req.Spec.Annotations.Name = "default" + } else if req.Spec.Annotations.Name != "default" { + return errors.New(`swarm spec must be named "default"`) + } + + return nil +} + +func validateAndSanitizeJoinRequest(req *types.JoinRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + if len(req.RemoteAddrs) == 0 { + return errors.New("at least 1 RemoteAddr is required to join") + } + for i := range req.RemoteAddrs { + req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i]) + if err != nil { + return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err) + } + } + return nil +} + +func validateAddr(addr string) (string, error) { + if addr == "" { + return addr, errors.New("invalid empty address") + } + newaddr, err := opts.ParseTCPAddr(addr, defaultAddr) + if err != nil { + return addr, nil + } + return strings.TrimPrefix(newaddr, "tcp://"), nil +} + +func initClusterSpec(node *swarmnode.Node, spec types.Spec) error { + ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) + for conn := range node.ListenControlSocket(ctx) { + if ctx.Err() != nil { + return ctx.Err() + } + if conn != nil { + client := swarmapi.NewControlClient(conn) + var cluster *swarmapi.Cluster + for i := 0; ; i++ { + lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{}) + if err != nil { + return fmt.Errorf("error on listing clusters: %v", err) + } + if len(lcr.Clusters) == 0 { + if i < 10 { + time.Sleep(200 * time.Millisecond) + continue + } + return errors.New("empty list of clusters was returned") + } + cluster = lcr.Clusters[0] + break + } + // In init, we take the initial default values from swarmkit, and merge + // any non nil or 0 value from spec to GRPC spec. This will leave the + // default value alone. + // Note that this is different from Update(), as in Update() we expect + // user to specify the complete spec of the cluster (as they already know + // the existing one and knows which field to update) + clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{ + ClusterID: cluster.ID, + ClusterVersion: &cluster.Meta.Version, + Spec: &clusterSpec, + }) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + return nil + } + } + return ctx.Err() +} + +func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { + var ids []string + filters := filters.NewArgs() + filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) + containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ + Filters: filters, + }) + if err != nil { + return []string{}, err + } + for _, c := range containers { + ids = append(ids, c.ID) + } + return ids, nil +} diff --git a/components/engine/daemon/cluster/tasks.go b/components/engine/daemon/cluster/tasks.go new file mode 100644 index 0000000000..47cd5563b9 --- /dev/null +++ b/components/engine/daemon/cluster/tasks.go @@ -0,0 +1,87 @@ +package cluster + +import ( + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// GetTasks returns a list of tasks matching the filter options. +func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + byName := func(filter filters.Args) error { + if filter.Include("service") { + serviceFilters := filter.Get("service") + for _, serviceFilter := range serviceFilters { + service, err := c.GetService(serviceFilter, false) + if err != nil { + return err + } + filter.Del("service", serviceFilter) + filter.Add("service", service.ID) + } + } + if filter.Include("node") { + nodeFilters := filter.Get("node") + for _, nodeFilter := range nodeFilters { + node, err := c.GetNode(nodeFilter) + if err != nil { + return err + } + filter.Del("node", nodeFilter) + filter.Add("node", node.ID) + } + } + return nil + } + + filters, err := newListTasksFilters(options.Filters, byName) + if err != nil { + return nil, err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListTasks( + ctx, + &swarmapi.ListTasksRequest{Filters: filters}) + if err != nil { + return nil, err + } + + tasks := make([]types.Task, 0, len(r.Tasks)) + + for _, task := range r.Tasks { + if task.Spec.GetContainer() != nil { + tasks = append(tasks, convert.TaskFromGRPC(*task)) + } + } + return tasks, nil +} + +// GetTask returns a task by an ID. +func (c *Cluster) GetTask(input string) (types.Task, error) { + var task *swarmapi.Task + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + t, err := getTask(ctx, state.controlClient, input) + if err != nil { + return err + } + task = t + return nil + }); err != nil { + return types.Task{}, err + } + return convert.TaskFromGRPC(*task), nil +} diff --git a/components/engine/daemon/cluster/utils.go b/components/engine/daemon/cluster/utils.go new file mode 100644 index 0000000000..93827961e1 --- /dev/null +++ b/components/engine/daemon/cluster/utils.go @@ -0,0 +1,63 @@ +package cluster + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/ioutils" +) + +func loadPersistentState(root string) (*nodeStartConfig, error) { + dt, err := ioutil.ReadFile(filepath.Join(root, stateFile)) + if err != nil { + return nil, err + } + // missing certificate means no actual state to restore from + if _, err := os.Stat(filepath.Join(root, "certificates/swarm-node.crt")); err != nil { + if os.IsNotExist(err) { + clearPersistentState(root) + } + return nil, err + } + var st nodeStartConfig + if err := json.Unmarshal(dt, &st); err != nil { + return nil, err + } + return &st, nil +} + +func savePersistentState(root string, config nodeStartConfig) error { + dt, err := json.Marshal(config) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(root, stateFile), dt, 0600) +} + +func clearPersistentState(root string) error { + // todo: backup this data instead of removing? + // rather than delete the entire swarm directory, delete the contents in order to preserve the inode + // (for example, allowing it to be bind-mounted) + files, err := ioutil.ReadDir(root) + if err != nil { + return err + } + + for _, f := range files { + if err := os.RemoveAll(filepath.Join(root, f.Name())); err != nil { + return err + } + } + + return nil +} + +func removingManagerCausesLossOfQuorum(reachable, unreachable int) bool { + return reachable-2 <= unreachable +} + +func isLastManager(reachable, unreachable int) bool { + return reachable == 1 && unreachable == 0 +} diff --git a/components/engine/daemon/commit.go b/components/engine/daemon/commit.go new file mode 100644 index 0000000000..e64c7d3333 --- /dev/null +++ b/components/engine/daemon/commit.go @@ -0,0 +1,282 @@ +package daemon + +import ( + "encoding/json" + "io" + "runtime" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/pkg/errors" +) + +// merge merges two Config, the image container configuration (defaults values), +// and the user container configuration, either passed by the API or generated +// by the cli. +// It will mutate the specified user configuration (userConf) with the image +// configuration where the user configuration is incomplete. +func merge(userConf, imageConf *containertypes.Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if runtime.GOOS == "windows" { + // Case insensitive environment variables on Windows + imageEnvKey = strings.ToUpper(imageEnvKey) + userEnvKey = strings.ToUpper(userEnvKey) + } + if imageEnvKey == userEnvKey { + found = true + break + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + + if userConf.Labels == nil { + userConf.Labels = map[string]string{} + } + for l, v := range imageConf.Labels { + if _, ok := userConf.Labels[l]; !ok { + userConf.Labels[l] = v + } + } + + if len(userConf.Entrypoint) == 0 { + if len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + userConf.ArgsEscaped = imageConf.ArgsEscaped + } + + if userConf.Entrypoint == nil { + userConf.Entrypoint = imageConf.Entrypoint + } + } + if imageConf.Healthcheck != nil { + if userConf.Healthcheck == nil { + userConf.Healthcheck = imageConf.Healthcheck + } else { + if len(userConf.Healthcheck.Test) == 0 { + userConf.Healthcheck.Test = imageConf.Healthcheck.Test + } + if userConf.Healthcheck.Interval == 0 { + userConf.Healthcheck.Interval = imageConf.Healthcheck.Interval + } + if userConf.Healthcheck.Timeout == 0 { + userConf.Healthcheck.Timeout = imageConf.Healthcheck.Timeout + } + if userConf.Healthcheck.StartPeriod == 0 { + userConf.Healthcheck.StartPeriod = imageConf.Healthcheck.StartPeriod + } + if userConf.Healthcheck.Retries == 0 { + userConf.Healthcheck.Retries = imageConf.Healthcheck.Retries + } + } + } + + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + + if userConf.StopSignal == "" { + userConf.StopSignal = imageConf.StopSignal + } + return nil +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository. +func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (string, error) { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return "", err + } + + containerConfig := c.ContainerConfig + if containerConfig == nil { + containerConfig = container.Config + } + + // It is not possible to commit a running container on Windows and on Solaris. + if (runtime.GOOS == "windows" || runtime.GOOS == "solaris") && container.IsRunning() { + return "", errors.Errorf("%+v does not support commit of a running container", runtime.GOOS) + } + + if c.Pause && !container.IsPaused() { + daemon.containerPause(container) + defer daemon.containerUnpause(container) + } + + newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes) + if err != nil { + return "", err + } + + if c.MergeConfigs { + if err := merge(newConfig, container.Config); err != nil { + return "", err + } + } + + rwTar, err := daemon.exportContainerRw(container) + if err != nil { + return "", err + } + defer func() { + if rwTar != nil { + rwTar.Close() + } + }() + + var history []image.History + rootFS := image.NewRootFS() + osVersion := "" + var osFeatures []string + + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return "", err + } + history = img.History + rootFS = img.RootFS + osVersion = img.OSVersion + osFeatures = img.OSFeatures + } + + l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID()) + if err != nil { + return "", err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + h := image.History{ + Author: c.Author, + Created: time.Now().UTC(), + CreatedBy: strings.Join(containerConfig.Cmd, " "), + Comment: c.Comment, + EmptyLayer: true, + } + + if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID { + h.EmptyLayer = false + rootFS.Append(diffID) + } + + history = append(history, h) + + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: newConfig, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Container: container.ID, + ContainerConfig: *containerConfig, + Author: c.Author, + Created: h.Created, + }, + RootFS: rootFS, + History: history, + OSFeatures: osFeatures, + OSVersion: osVersion, + }) + + if err != nil { + return "", err + } + + id, err := daemon.imageStore.Create(config) + if err != nil { + return "", err + } + + if container.ImageID != "" { + if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil { + return "", err + } + } + + imageRef := "" + if c.Repo != "" { + newTag, err := reference.ParseNormalizedNamed(c.Repo) // todo: should move this to API layer + if err != nil { + return "", err + } + if !reference.IsNameOnly(newTag) { + return "", errors.Errorf("unexpected repository name: %s", c.Repo) + } + if c.Tag != "" { + if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { + return "", err + } + } + if err := daemon.TagImageWithReference(id, newTag); err != nil { + return "", err + } + imageRef = reference.FamiliarString(newTag) + } + + attributes := map[string]string{ + "comment": c.Comment, + "imageID": id.String(), + "imageRef": imageRef, + } + daemon.LogContainerEventWithAttributes(container, "commit", attributes) + containerActions.WithValues("commit").UpdateSince(start) + return id.String(), nil +} + +func (daemon *Daemon) exportContainerRw(container *container.Container) (io.ReadCloser, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + archive, err := container.RWLayer.TarStream() + if err != nil { + daemon.Unmount(container) // logging is already handled in the `Unmount` function + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + archive.Close() + return container.RWLayer.Unmount() + }), + nil +} diff --git a/components/engine/daemon/config/config.go b/components/engine/daemon/config/config.go new file mode 100644 index 0000000000..beb147224f --- /dev/null +++ b/components/engine/daemon/config/config.go @@ -0,0 +1,523 @@ +package config + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "reflect" + "runtime" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + daemondiscovery "github.com/docker/docker/daemon/discovery" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/discovery" + "github.com/docker/docker/registry" + "github.com/imdario/mergo" + "github.com/spf13/pflag" +) + +const ( + // DefaultMaxConcurrentDownloads is the default value for + // maximum number of downloads that + // may take place at a time for each pull. + DefaultMaxConcurrentDownloads = 3 + // DefaultMaxConcurrentUploads is the default value for + // maximum number of uploads that + // may take place at a time for each push. + DefaultMaxConcurrentUploads = 5 + // StockRuntimeName is the reserved name/alias used to represent the + // OCI runtime being shipped with the docker daemon package. + StockRuntimeName = "runc" + // DefaultShmSize is the default value for container's shm size + DefaultShmSize = int64(67108864) + // DefaultNetworkMtu is the default value for network MTU + DefaultNetworkMtu = 1500 + // DisableNetworkBridge is the default value of the option to disable network bridge + DisableNetworkBridge = "none" + // DefaultInitBinary is the name of the default init binary + DefaultInitBinary = "docker-init" +) + +// flatOptions contains configuration keys +// that MUST NOT be parsed as deep structures. +// Use this to differentiate these options +// with others like the ones in CommonTLSOptions. +var flatOptions = map[string]bool{ + "cluster-store-opts": true, + "log-opts": true, + "runtimes": true, + "default-ulimits": true, +} + +// LogConfig represents the default log configuration. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type LogConfig struct { + Type string `json:"log-driver,omitempty"` + Config map[string]string `json:"log-opts,omitempty"` +} + +// commonBridgeConfig stores all the platform-common bridge driver specific +// configuration. +type commonBridgeConfig struct { + Iface string `json:"bridge,omitempty"` + FixedCIDR string `json:"fixed-cidr,omitempty"` +} + +// CommonTLSOptions defines TLS configuration for the daemon server. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type CommonTLSOptions struct { + CAFile string `json:"tlscacert,omitempty"` + CertFile string `json:"tlscert,omitempty"` + KeyFile string `json:"tlskey,omitempty"` +} + +// CommonConfig defines the configuration of a docker daemon which is +// common across platforms. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type CommonConfig struct { + AuthzMiddleware *authorization.Middleware `json:"-"` + AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins + AutoRestart bool `json:"-"` + Context map[string][]string `json:"-"` + DisableBridge bool `json:"-"` + DNS []string `json:"dns,omitempty"` + DNSOptions []string `json:"dns-opts,omitempty"` + DNSSearch []string `json:"dns-search,omitempty"` + ExecOptions []string `json:"exec-opts,omitempty"` + GraphDriver string `json:"storage-driver,omitempty"` + GraphOptions []string `json:"storage-opts,omitempty"` + Labels []string `json:"labels,omitempty"` + Mtu int `json:"mtu,omitempty"` + Pidfile string `json:"pidfile,omitempty"` + RawLogs bool `json:"raw-logs,omitempty"` + RootDeprecated string `json:"graph,omitempty"` + Root string `json:"data-root,omitempty"` + SocketGroup string `json:"group,omitempty"` + CorsHeaders string `json:"api-cors-header,omitempty"` + EnableCors bool `json:"api-enable-cors,omitempty"` + + // TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests + // when pushing to a registry which does not support schema 2. This field is marked as + // deprecated because schema 1 manifests are deprecated in favor of schema 2 and the + // daemon ID will use a dedicated identifier not shared with exported signatures. + TrustKeyPath string `json:"deprecated-key-path,omitempty"` + + // LiveRestoreEnabled determines whether we should keep containers + // alive upon daemon shutdown/start + LiveRestoreEnabled bool `json:"live-restore,omitempty"` + + // ClusterStore is the storage backend used for the cluster information. It is used by both + // multihost networking (to store networks and endpoints information) and by the node discovery + // mechanism. + ClusterStore string `json:"cluster-store,omitempty"` + + // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such + // as TLS configuration settings. + ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"` + + // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node + // discovery. This should be a 'host:port' combination on which that daemon instance is + // reachable by other hosts. + ClusterAdvertise string `json:"cluster-advertise,omitempty"` + + // MaxConcurrentDownloads is the maximum number of downloads that + // may take place at a time for each pull. + MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"` + + // MaxConcurrentUploads is the maximum number of uploads that + // may take place at a time for each push. + MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"` + + // ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container + // to stop when daemon is being shutdown + ShutdownTimeout int `json:"shutdown-timeout,omitempty"` + + Debug bool `json:"debug,omitempty"` + Hosts []string `json:"hosts,omitempty"` + LogLevel string `json:"log-level,omitempty"` + TLS bool `json:"tls,omitempty"` + TLSVerify bool `json:"tlsverify,omitempty"` + + // Embedded structs that allow config + // deserialization without the full struct. + CommonTLSOptions + + // SwarmDefaultAdvertiseAddr is the default host/IP or network interface + // to use if a wildcard address is specified in the ListenAddr value + // given to the /swarm/init endpoint and no advertise address is + // specified. + SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"` + MetricsAddress string `json:"metrics-addr"` + + LogConfig + BridgeConfig // bridgeConfig holds bridge network specific configuration. + registry.ServiceOptions + + sync.Mutex + // FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags + // It should probably be handled outside this package. + ValuesSet map[string]interface{} + + Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not +} + +// IsValueSet returns true if a configuration value +// was explicitly set in the configuration file. +func (conf *Config) IsValueSet(name string) bool { + if conf.ValuesSet == nil { + return false + } + _, ok := conf.ValuesSet[name] + return ok +} + +// New returns a new fully initialized Config struct +func New() *Config { + config := Config{} + config.LogConfig.Config = make(map[string]string) + config.ClusterOpts = make(map[string]string) + + if runtime.GOOS != "linux" { + config.V2Only = true + } + return &config +} + +// ParseClusterAdvertiseSettings parses the specified advertise settings +func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) { + if runtime.GOOS == "solaris" && (clusterAdvertise != "" || clusterStore != "") { + return "", errors.New("Cluster Advertise Settings not supported on Solaris") + } + if clusterAdvertise == "" { + return "", daemondiscovery.ErrDiscoveryDisabled + } + if clusterStore == "" { + return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration") + } + + advertise, err := discovery.ParseAdvertise(clusterAdvertise) + if err != nil { + return "", fmt.Errorf("discovery advertise parsing failed (%v)", err) + } + return advertise, nil +} + +// GetConflictFreeLabels validates Labels for conflict +// In swarm the duplicates for labels are removed +// so we only take same values here, no conflict values +// If the key-value is the same we will only take the last label +func GetConflictFreeLabels(labels []string) ([]string, error) { + labelMap := map[string]string{} + for _, label := range labels { + stringSlice := strings.SplitN(label, "=", 2) + if len(stringSlice) > 1 { + // If there is a conflict we will return an error + if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { + return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v) + } + labelMap[stringSlice[0]] = stringSlice[1] + } + } + + newLabels := []string{} + for k, v := range labelMap { + newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v)) + } + return newLabels, nil +} + +// Reload reads the configuration in the host and reloads the daemon and server. +func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error { + logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) + newConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return err + } + + if err := Validate(newConfig); err != nil { + return fmt.Errorf("file configuration validation failed (%v)", err) + } + + // Labels of the docker engine used to allow multiple values associated with the same key. + // This is deprecated in 1.13, and, be removed after 3 release cycles. + // The following will check the conflict of labels, and report a warning for deprecation. + // + // TODO: After 3 release cycles (17.12) an error will be returned, and labels will be + // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): + // + // newLabels, err := GetConflictFreeLabels(newConfig.Labels) + // if err != nil { + // return err + // } + // newConfig.Labels = newLabels + // + if _, err := GetConflictFreeLabels(newConfig.Labels); err != nil { + logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + } + + reload(newConfig) + return nil +} + +// boolValue is an interface that boolean value flags implement +// to tell the command line how to make -name equivalent to -name=true. +type boolValue interface { + IsBoolFlag() bool +} + +// MergeDaemonConfigurations reads a configuration file, +// loads the file configuration in an isolated structure, +// and merges the configuration provided from flags on top +// if there are no conflicts. +func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) { + fileConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return nil, err + } + + if err := Validate(fileConfig); err != nil { + return nil, fmt.Errorf("configuration validation from file failed (%v)", err) + } + + // merge flags configuration on top of the file configuration + if err := mergo.Merge(fileConfig, flagsConfig); err != nil { + return nil, err + } + + // We need to validate again once both fileConfig and flagsConfig + // have been merged + if err := Validate(fileConfig); err != nil { + return nil, fmt.Errorf("merged configuration validation from file and command line flags failed (%v)", err) + } + + return fileConfig, nil +} + +// getConflictFreeConfiguration loads the configuration from a JSON file. +// It compares that configuration with the one provided by the flags, +// and returns an error if there are conflicts. +func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) { + b, err := ioutil.ReadFile(configFile) + if err != nil { + return nil, err + } + + var config Config + var reader io.Reader + if flags != nil { + var jsonConfig map[string]interface{} + reader = bytes.NewReader(b) + if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil { + return nil, err + } + + configSet := configValuesSet(jsonConfig) + + if err := findConfigurationConflicts(configSet, flags); err != nil { + return nil, err + } + + // Override flag values to make sure the values set in the config file with nullable values, like `false`, + // are not overridden by default truthy values from the flags that were not explicitly set. + // See https://github.com/docker/docker/issues/20289 for an example. + // + // TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers. + namedOptions := make(map[string]interface{}) + for key, value := range configSet { + f := flags.Lookup(key) + if f == nil { // ignore named flags that don't match + namedOptions[key] = value + continue + } + + if _, ok := f.Value.(boolValue); ok { + f.Value.Set(fmt.Sprintf("%v", value)) + } + } + if len(namedOptions) > 0 { + // set also default for mergeVal flags that are boolValue at the same time. + flags.VisitAll(func(f *pflag.Flag) { + if opt, named := f.Value.(opts.NamedOption); named { + v, set := namedOptions[opt.Name()] + _, boolean := f.Value.(boolValue) + if set && boolean { + f.Value.Set(fmt.Sprintf("%v", v)) + } + } + }) + } + + config.ValuesSet = configSet + } + + reader = bytes.NewReader(b) + if err := json.NewDecoder(reader).Decode(&config); err != nil { + return nil, err + } + + if config.RootDeprecated != "" { + logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`) + + if config.Root != "" { + return nil, fmt.Errorf(`cannot specify both "graph" and "data-root" config file options`) + } + + config.Root = config.RootDeprecated + } + + return &config, nil +} + +// configValuesSet returns the configuration values explicitly set in the file. +func configValuesSet(config map[string]interface{}) map[string]interface{} { + flatten := make(map[string]interface{}) + for k, v := range config { + if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { + for km, vm := range m { + flatten[km] = vm + } + continue + } + + flatten[k] = v + } + return flatten +} + +// findConfigurationConflicts iterates over the provided flags searching for +// duplicated configurations and unknown keys. It returns an error with all the conflicts if +// it finds any. +func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error { + // 1. Search keys from the file that we don't recognize as flags. + unknownKeys := make(map[string]interface{}) + for key, value := range config { + if flag := flags.Lookup(key); flag == nil { + unknownKeys[key] = value + } + } + + // 2. Discard values that implement NamedOption. + // Their configuration name differs from their flag name, like `labels` and `label`. + if len(unknownKeys) > 0 { + unknownNamedConflicts := func(f *pflag.Flag) { + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if _, valid := unknownKeys[namedOption.Name()]; valid { + delete(unknownKeys, namedOption.Name()) + } + } + } + flags.VisitAll(unknownNamedConflicts) + } + + if len(unknownKeys) > 0 { + var unknown []string + for key := range unknownKeys { + unknown = append(unknown, key) + } + return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) + } + + var conflicts []string + printConflict := func(name string, flagValue, fileValue interface{}) string { + return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) + } + + // 3. Search keys that are present as a flag and as a file option. + duplicatedConflicts := func(f *pflag.Flag) { + // search option name in the json configuration payload if the value is a named option + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if optsValue, ok := config[namedOption.Name()]; ok { + conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) + } + } else { + // search flag name in the json configuration payload + for _, name := range []string{f.Name, f.Shorthand} { + if value, ok := config[name]; ok { + conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) + break + } + } + } + } + + flags.Visit(duplicatedConflicts) + + if len(conflicts) > 0 { + return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) + } + return nil +} + +// Validate validates some specific configs. +// such as config.DNS, config.Labels, config.DNSSearch, +// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads. +func Validate(config *Config) error { + // validate DNS + for _, dns := range config.DNS { + if _, err := opts.ValidateIPAddress(dns); err != nil { + return err + } + } + + // validate DNSSearch + for _, dnsSearch := range config.DNSSearch { + if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil { + return err + } + } + + // validate Labels + for _, label := range config.Labels { + if _, err := opts.ValidateLabel(label); err != nil { + return err + } + } + // validate MaxConcurrentDownloads + if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 { + return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads) + } + // validate MaxConcurrentUploads + if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 { + return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads) + } + + // validate that "default" runtime is not reset + if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 { + if _, ok := runtimes[StockRuntimeName]; ok { + return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName) + } + } + + if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" && defaultRuntime != StockRuntimeName { + runtimes := config.GetAllRuntimes() + if _, ok := runtimes[defaultRuntime]; !ok { + return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime) + } + } + + return nil +} + +// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not. +func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool { + if config.ClusterStore != backendType || config.ClusterAdvertise != advertise { + return true + } + + if (config.ClusterOpts == nil && clusterOpts == nil) || + (config.ClusterOpts == nil && len(clusterOpts) == 0) || + (len(config.ClusterOpts) == 0 && clusterOpts == nil) { + return false + } + + return !reflect.DeepEqual(config.ClusterOpts, clusterOpts) +} diff --git a/components/engine/daemon/config/config_common_unix.go b/components/engine/daemon/config/config_common_unix.go new file mode 100644 index 0000000000..2e818cb344 --- /dev/null +++ b/components/engine/daemon/config/config_common_unix.go @@ -0,0 +1,73 @@ +// +build solaris linux freebsd + +package config + +import ( + "net" + + "github.com/docker/docker/api/types" +) + +// CommonUnixConfig defines configuration of a docker daemon that is +// common across Unix platforms. +type CommonUnixConfig struct { + ExecRoot string `json:"exec-root,omitempty"` + ContainerdAddr string `json:"containerd,omitempty"` + Runtimes map[string]types.Runtime `json:"runtimes,omitempty"` + DefaultRuntime string `json:"default-runtime,omitempty"` + DefaultInitBinary string `json:"default-init,omitempty"` +} + +type commonUnixBridgeConfig struct { + DefaultIP net.IP `json:"ip,omitempty"` + IP string `json:"bip,omitempty"` + DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` + DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` + InterContainerCommunication bool `json:"icc,omitempty"` +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (conf *Config) GetRuntime(name string) *types.Runtime { + conf.Lock() + defer conf.Unlock() + if rt, ok := conf.Runtimes[name]; ok { + return &rt + } + return nil +} + +// GetDefaultRuntimeName returns the current default runtime +func (conf *Config) GetDefaultRuntimeName() string { + conf.Lock() + rt := conf.DefaultRuntime + conf.Unlock() + + return rt +} + +// GetAllRuntimes returns a copy of the runtimes map +func (conf *Config) GetAllRuntimes() map[string]types.Runtime { + conf.Lock() + rts := conf.Runtimes + conf.Unlock() + return rts +} + +// GetExecRoot returns the user configured Exec-root +func (conf *Config) GetExecRoot() string { + return conf.ExecRoot +} + +// GetInitPath returns the configure docker-init path +func (conf *Config) GetInitPath() string { + conf.Lock() + defer conf.Unlock() + if conf.InitPath != "" { + return conf.InitPath + } + if conf.DefaultInitBinary != "" { + return conf.DefaultInitBinary + } + return DefaultInitBinary +} diff --git a/components/engine/daemon/config/config_common_unix_test.go b/components/engine/daemon/config/config_common_unix_test.go new file mode 100644 index 0000000000..8647a22063 --- /dev/null +++ b/components/engine/daemon/config/config_common_unix_test.go @@ -0,0 +1,84 @@ +// +build !windows + +package config + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestCommonUnixValidateConfigurationErrors(t *testing.T) { + testCases := []struct { + config *Config + }{ + // Can't override the stock runtime + { + config: &Config{ + CommonUnixConfig: CommonUnixConfig{ + Runtimes: map[string]types.Runtime{ + StockRuntimeName: {}, + }, + }, + }, + }, + // Default runtime should be present in runtimes + { + config: &Config{ + CommonUnixConfig: CommonUnixConfig{ + Runtimes: map[string]types.Runtime{ + "foo": {}, + }, + DefaultRuntime: "bar", + }, + }, + }, + } + for _, tc := range testCases { + err := Validate(tc.config) + if err == nil { + t.Fatalf("expected error, got nil for config %v", tc.config) + } + } +} + +func TestCommonUnixGetInitPath(t *testing.T) { + testCases := []struct { + config *Config + expectedInitPath string + }{ + { + config: &Config{ + InitPath: "some-init-path", + }, + expectedInitPath: "some-init-path", + }, + { + config: &Config{ + CommonUnixConfig: CommonUnixConfig{ + DefaultInitBinary: "foo-init-bin", + }, + }, + expectedInitPath: "foo-init-bin", + }, + { + config: &Config{ + InitPath: "init-path-A", + CommonUnixConfig: CommonUnixConfig{ + DefaultInitBinary: "init-path-B", + }, + }, + expectedInitPath: "init-path-A", + }, + { + config: &Config{}, + expectedInitPath: "docker-init", + }, + } + for _, tc := range testCases { + initPath := tc.config.GetInitPath() + if initPath != tc.expectedInitPath { + t.Fatalf("expected initPath to be %v, got %v", tc.expectedInitPath, initPath) + } + } +} diff --git a/components/engine/daemon/config/config_solaris.go b/components/engine/daemon/config/config_solaris.go new file mode 100644 index 0000000000..f4f0802701 --- /dev/null +++ b/components/engine/daemon/config/config_solaris.go @@ -0,0 +1,29 @@ +package config + +import ( + "github.com/spf13/pflag" +) + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker -d -e lxc` +type Config struct { + CommonConfig + + // These fields are common to all unix platforms. + CommonUnixConfig +} + +// BridgeConfig stores all the bridge driver specific +// configuration. +type BridgeConfig struct { + commonBridgeConfig + + // Fields below here are platform specific. + commonUnixBridgeConfig +} + +// IsSwarmCompatible defines if swarm mode can be enabled in this config +func (conf *Config) IsSwarmCompatible() error { + return nil +} diff --git a/components/engine/daemon/config/config_test.go b/components/engine/daemon/config/config_test.go new file mode 100644 index 0000000000..cc5f01063c --- /dev/null +++ b/components/engine/daemon/config/config_test.go @@ -0,0 +1,391 @@ +package config + +import ( + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/daemon/discovery" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +func TestDaemonConfigurationNotFound(t *testing.T) { + _, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("expected does not exist error, got %v", err) + } +} + +func TestDaemonBrokenConfiguration(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"Debug": tru`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err == nil { + t.Fatalf("expected error, got %v", err) + } +} + +func TestParseClusterAdvertiseSettings(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip("ClusterSettings not supported on Solaris\n") + } + _, err := ParseClusterAdvertiseSettings("something", "") + if err != discovery.ErrDiscoveryDisabled { + t.Fatalf("expected discovery disabled error, got %v\n", err) + } + + _, err = ParseClusterAdvertiseSettings("", "something") + if err == nil { + t.Fatalf("expected discovery store error, got %v\n", err) + } + + _, err = ParseClusterAdvertiseSettings("etcd", "127.0.0.1:8080") + if err != nil { + t.Fatal(err) + } +} + +func TestFindConfigurationConflicts(t *testing.T) { + config := map[string]interface{}{"authorization-plugins": "foobar"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.String("authorization-plugins", "", "") + assert.NoError(t, flags.Set("authorization-plugins", "asdf")) + + testutil.ErrorContains(t, + findConfigurationConflicts(config, flags), + "authorization-plugins: (from flag: asdf, from file: foobar)") +} + +func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) { + config := map[string]interface{}{"hosts": []string{"qwer"}} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + var hosts []string + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), "host", "H", "Daemon socket(s) to connect to") + assert.NoError(t, flags.Set("host", "tcp://127.0.0.1:4444")) + assert.NoError(t, flags.Set("host", "unix:///var/run/docker.sock")) + + testutil.ErrorContains(t, findConfigurationConflicts(config, flags), "hosts") +} + +func TestDaemonConfigurationMergeConflicts(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"debug": true}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.Bool("debug", false, "") + flags.Set("debug", "false") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "debug") { + t.Fatalf("expected debug conflict, got %v", err) + } +} + +func TestDaemonConfigurationMergeConcurrent(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"max-concurrent-downloads": 1}`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err != nil { + t.Fatal("expected error, got nil") + } +} + +func TestDaemonConfigurationMergeConcurrentError(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"max-concurrent-downloads": -1}`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err == nil { + t.Fatalf("expected no error, got error %v", err) + } +} + +func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("tlscacert", "", "") + flags.Set("tlscacert", "~/.docker/ca.pem") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "tlscacert") { + t.Fatalf("expected tlscacert conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithUnknownKeys(t *testing.T) { + config := map[string]interface{}{"tls-verify": "true"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.Bool("tlsverify", false, "") + err := findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "the following directives don't match any configuration option: tls-verify") { + t.Fatalf("expected tls-verify conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithMergedValues(t *testing.T) { + var hosts []string + config := map[string]interface{}{"hosts": "tcp://127.0.0.1:2345"} + flags := pflag.NewFlagSet("base", pflag.ContinueOnError) + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, nil), "host", "H", "") + + err := findConfigurationConflicts(config, flags) + if err != nil { + t.Fatal(err) + } + + flags.Set("host", "unix:///var/run/docker.sock") + err = findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "hosts: (from flag: [unix:///var/run/docker.sock], from file: tcp://127.0.0.1:2345)") { + t.Fatalf("expected hosts conflict, got %v", err) + } +} + +func TestValidateConfigurationErrors(t *testing.T) { + minusNumber := -10 + testCases := []struct { + config *Config + }{ + { + config: &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo=bar", "one"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1o"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"2.2.2.2", "1.1.1.1o"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"123456"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"a.b.c", "123456"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentDownloads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-downloads": -1, + }, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentUploads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-uploads": -1, + }, + }, + }, + }, + } + for _, tc := range testCases { + err := Validate(tc.config) + if err == nil { + t.Fatalf("expected error, got nil for config %v", tc.config) + } + } +} + +func TestValidateConfiguration(t *testing.T) { + minusNumber := 4 + testCases := []struct { + config *Config + }{ + { + config: &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one=two"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"a.b.c"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentDownloads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-downloads": -1, + }, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentUploads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-uploads": -1, + }, + }, + }, + }, + } + for _, tc := range testCases { + err := Validate(tc.config) + if err != nil { + t.Fatalf("expected no error, got error %v", err) + } + } +} + +func TestModifiedDiscoverySettings(t *testing.T) { + cases := []struct { + current *Config + modified *Config + expected bool + }{ + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", nil), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("baz", "bar", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "baz", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: true, + }, + } + + for _, c := range cases { + got := ModifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts) + if c.expected != got { + t.Fatalf("expected %v, got %v: current config %v, new config %v", c.expected, got, c.current, c.modified) + } + } +} + +func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config { + return &Config{ + CommonConfig: CommonConfig{ + ClusterStore: backendAddr, + ClusterAdvertise: advertiseAddr, + ClusterOpts: opts, + }, + } +} diff --git a/components/engine/daemon/config/config_unix.go b/components/engine/daemon/config/config_unix.go new file mode 100644 index 0000000000..8f1da59198 --- /dev/null +++ b/components/engine/daemon/config/config_unix.go @@ -0,0 +1,63 @@ +// +build linux freebsd + +package config + +import ( + "fmt" + + "github.com/docker/docker/opts" + units "github.com/docker/go-units" +) + +// Config defines the configuration of a docker daemon. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type Config struct { + CommonConfig + + // These fields are common to all unix platforms. + CommonUnixConfig + + // Fields below here are platform specific. + CgroupParent string `json:"cgroup-parent,omitempty"` + EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` + RemappedRoot string `json:"userns-remap,omitempty"` + Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` + CPURealtimePeriod int64 `json:"cpu-rt-period,omitempty"` + CPURealtimeRuntime int64 `json:"cpu-rt-runtime,omitempty"` + OOMScoreAdjust int `json:"oom-score-adjust,omitempty"` + Init bool `json:"init,omitempty"` + InitPath string `json:"init-path,omitempty"` + SeccompProfile string `json:"seccomp-profile,omitempty"` + ShmSize opts.MemBytes `json:"default-shm-size,omitempty"` + NoNewPrivileges bool `json:"no-new-privileges,omitempty"` +} + +// BridgeConfig stores all the bridge driver specific +// configuration. +type BridgeConfig struct { + commonBridgeConfig + + // These fields are common to all unix platforms. + commonUnixBridgeConfig + + // Fields below here are platform specific. + EnableIPv6 bool `json:"ipv6,omitempty"` + EnableIPTables bool `json:"iptables,omitempty"` + EnableIPForward bool `json:"ip-forward,omitempty"` + EnableIPMasq bool `json:"ip-masq,omitempty"` + EnableUserlandProxy bool `json:"userland-proxy,omitempty"` + UserlandProxyPath string `json:"userland-proxy-path,omitempty"` + FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` +} + +// IsSwarmCompatible defines if swarm mode can be enabled in this config +func (conf *Config) IsSwarmCompatible() error { + if conf.ClusterStore != "" || conf.ClusterAdvertise != "" { + return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") + } + if conf.LiveRestoreEnabled { + return fmt.Errorf("--live-restore daemon configuration is incompatible with swarm mode") + } + return nil +} diff --git a/components/engine/daemon/config/config_unix_test.go b/components/engine/daemon/config/config_unix_test.go new file mode 100644 index 0000000000..3e7d1be94a --- /dev/null +++ b/components/engine/daemon/config/config_unix_test.go @@ -0,0 +1,111 @@ +// +build !windows + +package config + +import ( + "io/ioutil" + "runtime" + + "testing" +) + +func TestDaemonConfigurationMerge(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + + f.Write([]byte(` + { + "debug": true, + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Hard": 2048, + "Soft": 1024 + } + }, + "log-opts": { + "tag": "test_tag" + } + }`)) + + f.Close() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + cc, err := MergeDaemonConfigurations(c, nil, configFile) + if err != nil { + t.Fatal(err) + } + if !cc.Debug { + t.Fatalf("expected %v, got %v\n", true, cc.Debug) + } + if !cc.AutoRestart { + t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) + } + if cc.LogConfig.Type != "syslog" { + t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) + } + + if configValue, OK := cc.LogConfig.Config["tag"]; !OK { + t.Fatal("expected syslog config attributes, got nil\n") + } else { + if configValue != "test_tag" { + t.Fatalf("expected syslog config attributes 'tag=test_tag', got 'tag=%s'\n", configValue) + } + } + + if cc.Ulimits == nil { + t.Fatal("expected default ulimit config, got nil\n") + } else { + if _, OK := cc.Ulimits["nofile"]; OK { + if cc.Ulimits["nofile"].Name != "nofile" || + cc.Ulimits["nofile"].Hard != 2048 || + cc.Ulimits["nofile"].Soft != 1024 { + t.Fatalf("expected default ulimit name, hard and soft are nofile, 2048, 1024, got %s, %d, %d\n", cc.Ulimits["nofile"].Name, cc.Ulimits["nofile"].Hard, cc.Ulimits["nofile"].Soft) + } + } else { + t.Fatal("expected default ulimit name nofile, got nil\n") + } + } +} + +func TestDaemonConfigurationMergeShmSize(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip("ShmSize not supported on Solaris\n") + } + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + + f.Write([]byte(` + { + "default-shm-size": "1g" + }`)) + + f.Close() + + c := &Config{} + cc, err := MergeDaemonConfigurations(c, nil, configFile) + if err != nil { + t.Fatal(err) + } + expectedValue := 1 * 1024 * 1024 * 1024 + if cc.ShmSize.Value() != int64(expectedValue) { + t.Fatalf("expected default shm size %d, got %d", expectedValue, cc.ShmSize.Value()) + } +} diff --git a/components/engine/daemon/config/config_windows.go b/components/engine/daemon/config/config_windows.go new file mode 100644 index 0000000000..849acc1ac6 --- /dev/null +++ b/components/engine/daemon/config/config_windows.go @@ -0,0 +1,52 @@ +package config + +import ( + "github.com/docker/docker/api/types" +) + +// BridgeConfig stores all the bridge driver specific +// configuration. +type BridgeConfig struct { + commonBridgeConfig +} + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `dockerd -e windows` +type Config struct { + CommonConfig + + // Fields below here are platform specific. (There are none presently + // for the Windows daemon.) +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (conf *Config) GetRuntime(name string) *types.Runtime { + return nil +} + +// GetInitPath returns the configure docker-init path +func (conf *Config) GetInitPath() string { + return "" +} + +// GetDefaultRuntimeName returns the current default runtime +func (conf *Config) GetDefaultRuntimeName() string { + return StockRuntimeName +} + +// GetAllRuntimes returns a copy of the runtimes map +func (conf *Config) GetAllRuntimes() map[string]types.Runtime { + return map[string]types.Runtime{} +} + +// GetExecRoot returns the user configured Exec-root +func (conf *Config) GetExecRoot() string { + return "" +} + +// IsSwarmCompatible defines if swarm mode can be enabled in this config +func (conf *Config) IsSwarmCompatible() error { + return nil +} diff --git a/components/engine/daemon/config/config_windows_test.go b/components/engine/daemon/config/config_windows_test.go new file mode 100644 index 0000000000..1c435c630a --- /dev/null +++ b/components/engine/daemon/config/config_windows_test.go @@ -0,0 +1,59 @@ +// +build windows + +package config + +import ( + "io/ioutil" + "testing" +) + +func TestDaemonConfigurationMerge(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + + f.Write([]byte(` + { + "debug": true, + "log-opts": { + "tag": "test_tag" + } + }`)) + + f.Close() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + cc, err := MergeDaemonConfigurations(c, nil, configFile) + if err != nil { + t.Fatal(err) + } + if !cc.Debug { + t.Fatalf("expected %v, got %v\n", true, cc.Debug) + } + if !cc.AutoRestart { + t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) + } + if cc.LogConfig.Type != "syslog" { + t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) + } + + if configValue, OK := cc.LogConfig.Config["tag"]; !OK { + t.Fatal("expected syslog config attributes, got nil\n") + } else { + if configValue != "test_tag" { + t.Fatalf("expected syslog config attributes 'tag=test_tag', got 'tag=%s'\n", configValue) + } + } +} diff --git a/components/engine/daemon/configs.go b/components/engine/daemon/configs.go new file mode 100644 index 0000000000..31da56b2d3 --- /dev/null +++ b/components/engine/daemon/configs.go @@ -0,0 +1,23 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + swarmtypes "github.com/docker/docker/api/types/swarm" +) + +// SetContainerConfigReferences sets the container config references needed +func (daemon *Daemon) SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error { + if !configsSupported() && len(refs) > 0 { + logrus.Warn("configs are not supported on this platform") + return nil + } + + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.ConfigReferences = refs + + return nil +} diff --git a/components/engine/daemon/configs_linux.go b/components/engine/daemon/configs_linux.go new file mode 100644 index 0000000000..af20ad78bd --- /dev/null +++ b/components/engine/daemon/configs_linux.go @@ -0,0 +1,7 @@ +// +build linux + +package daemon + +func configsSupported() bool { + return true +} diff --git a/components/engine/daemon/configs_unsupported.go b/components/engine/daemon/configs_unsupported.go new file mode 100644 index 0000000000..1a7cbc9dc7 --- /dev/null +++ b/components/engine/daemon/configs_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!windows + +package daemon + +func configsSupported() bool { + return false +} diff --git a/components/engine/daemon/configs_windows.go b/components/engine/daemon/configs_windows.go new file mode 100644 index 0000000000..7cb2e9c438 --- /dev/null +++ b/components/engine/daemon/configs_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package daemon + +func configsSupported() bool { + return true +} diff --git a/components/engine/daemon/container.go b/components/engine/daemon/container.go new file mode 100644 index 0000000000..6a660ba4f3 --- /dev/null +++ b/components/engine/daemon/container.go @@ -0,0 +1,311 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/api/errors" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" +) + +// GetContainer looks for a container using the provided information, which could be +// one of the following inputs from the caller: +// - A full container ID, which will exact match a container in daemon's list +// - A container name, which will only exact match via the GetByName() function +// - A partial container ID prefix (e.g. short ID) of any length that is +// unique enough to only return a single container object +// If none of these searches succeed, an error is returned +func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { + if len(prefixOrName) == 0 { + return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied")) + } + + if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { + // prefix is an exact match to a full container ID + return containerByID, nil + } + + // GetByName will match only an exact name provided; we ignore errors + if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { + // prefix is an exact match to a full container Name + return containerByName, nil + } + + containerID, indexError := daemon.idIndex.Get(prefixOrName) + if indexError != nil { + // When truncindex defines an error type, use that instead + if indexError == truncindex.ErrNotExist { + err := fmt.Errorf("No such container: %s", prefixOrName) + return nil, errors.NewRequestNotFoundError(err) + } + return nil, indexError + } + return daemon.containers.Get(containerID), nil +} + +// checkContainer make sure the specified container validates the specified conditions +func (daemon *Daemon) checkContainer(container *container.Container, conditions ...func(*container.Container) error) error { + for _, condition := range conditions { + if err := condition(container); err != nil { + return err + } + } + return nil +} + +// Exists returns a true if a container of the specified ID or name exists, +// false otherwise. +func (daemon *Daemon) Exists(id string) bool { + c, _ := daemon.GetContainer(id) + return c != nil +} + +// IsPaused returns a bool indicating if the specified container is paused. +func (daemon *Daemon) IsPaused(id string) bool { + c, _ := daemon.GetContainer(id) + return c.State.IsPaused() +} + +func (daemon *Daemon) containerRoot(id string) string { + return filepath.Join(daemon.repository, id) +} + +// Load reads the contents of a container from disk +// This is typically done at startup. +func (daemon *Daemon) load(id string) (*container.Container, error) { + container := daemon.newBaseContainer(id) + + if err := container.FromDisk(); err != nil { + return nil, err + } + + if container.ID != id { + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + } + + return container, nil +} + +// Register makes a container object usable by the daemon as +func (daemon *Daemon) Register(c *container.Container) { + // Attach to stdout and stderr + if c.Config.OpenStdin { + c.StreamConfig.NewInputPipes() + } else { + c.StreamConfig.NewNopInputPipe() + } + + daemon.containers.Add(c.ID, c) + daemon.idIndex.Add(c.ID) +} + +func (daemon *Daemon) newContainer(name string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) { + var ( + id string + err error + noExplicitName = name == "" + ) + id, name, err = daemon.generateIDAndName(name) + if err != nil { + return nil, err + } + + if hostConfig.NetworkMode.IsHost() { + if config.Hostname == "" { + config.Hostname, err = os.Hostname() + if err != nil { + return nil, err + } + } + } else { + daemon.generateHostname(id, config) + } + entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) + + base := daemon.newBaseContainer(id) + base.Created = time.Now().UTC() + base.Managed = managed + base.Path = entrypoint + base.Args = args //FIXME: de-duplicate from config + base.Config = config + base.HostConfig = &containertypes.HostConfig{} + base.ImageID = imgID + base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} + base.Name = name + base.Driver = daemon.GraphDriverName() + + return base, err +} + +// GetByName returns a container given a name. +func (daemon *Daemon) GetByName(name string) (*container.Container, error) { + if len(name) == 0 { + return nil, fmt.Errorf("No container name supplied") + } + fullName := name + if name[0] != '/' { + fullName = "/" + name + } + id, err := daemon.nameIndex.Get(fullName) + if err != nil { + return nil, fmt.Errorf("Could not find entity for %s", name) + } + e := daemon.containers.Get(id) + if e == nil { + return nil, fmt.Errorf("Could not find container for entity id %s", id) + } + return e, nil +} + +// newBaseContainer creates a new container with its initial +// configuration based on the root storage from the daemon. +func (daemon *Daemon) newBaseContainer(id string) *container.Container { + return container.NewBaseContainer(id, daemon.containerRoot(id)) +} + +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) { + if len(configEntrypoint) != 0 { + return configEntrypoint[0], append(configEntrypoint[1:], configCmd...) + } + return configCmd[0], configCmd[1:] +} + +func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) { + // Generate default hostname + if config.Hostname == "" { + config.Hostname = id[:12] + } +} + +func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + return daemon.parseSecurityOpt(container, hostConfig) +} + +func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { + // Do not lock while creating volumes since this could be calling out to external plugins + // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin + if err := daemon.registerMountPoints(container, hostConfig); err != nil { + return err + } + + container.Lock() + defer container.Unlock() + + // Register any links from the host config before starting the container + if err := daemon.registerLinks(container, hostConfig); err != nil { + return err + } + + runconfig.SetDefaultNetModeIfBlank(hostConfig) + container.HostConfig = hostConfig + return container.ToDisk() +} + +// verifyContainerSettings performs validation of the hostconfig and config +// structures. +func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + + // First perform verification of settings common across all platforms. + if config != nil { + if config.WorkingDir != "" { + config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics + if !system.IsAbs(config.WorkingDir) { + return nil, fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir) + } + } + + if len(config.StopSignal) > 0 { + _, err := signal.ParseSignal(config.StopSignal) + if err != nil { + return nil, err + } + } + + // Validate if Env contains empty variable or not (e.g., ``, `=foo`) + for _, env := range config.Env { + if _, err := opts.ValidateEnv(env); err != nil { + return nil, err + } + } + + // Validate the healthcheck params of Config + if config.Healthcheck != nil { + if config.Healthcheck.Interval != 0 && config.Healthcheck.Interval < containertypes.MinimumDuration { + return nil, fmt.Errorf("Interval in Healthcheck cannot be less than %s", containertypes.MinimumDuration) + } + + if config.Healthcheck.Timeout != 0 && config.Healthcheck.Timeout < containertypes.MinimumDuration { + return nil, fmt.Errorf("Timeout in Healthcheck cannot be less than %s", containertypes.MinimumDuration) + } + + if config.Healthcheck.Retries < 0 { + return nil, fmt.Errorf("Retries in Healthcheck cannot be negative") + } + + if config.Healthcheck.StartPeriod != 0 && config.Healthcheck.StartPeriod < containertypes.MinimumDuration { + return nil, fmt.Errorf("StartPeriod in Healthcheck cannot be less than %s", containertypes.MinimumDuration) + } + } + } + + if hostConfig == nil { + return nil, nil + } + + if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return nil, fmt.Errorf("can't create 'AutoRemove' container with restart policy") + } + + for _, extraHost := range hostConfig.ExtraHosts { + if _, err := opts.ValidateExtraHost(extraHost); err != nil { + return nil, err + } + } + + for port := range hostConfig.PortBindings { + _, portStr := nat.SplitProtoPort(string(port)) + if _, err := nat.ParsePort(portStr); err != nil { + return nil, fmt.Errorf("invalid port specification: %q", portStr) + } + for _, pb := range hostConfig.PortBindings[port] { + _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) + if err != nil { + return nil, fmt.Errorf("invalid port specification: %q", pb.HostPort) + } + } + } + + p := hostConfig.RestartPolicy + + switch p.Name { + case "always", "unless-stopped", "no": + if p.MaximumRetryCount != 0 { + return nil, fmt.Errorf("maximum retry count cannot be used with restart policy '%s'", p.Name) + } + case "on-failure": + if p.MaximumRetryCount < 0 { + return nil, fmt.Errorf("maximum retry count cannot be negative") + } + case "": + // do nothing + default: + return nil, fmt.Errorf("invalid restart policy '%s'", p.Name) + } + + // Now do platform-specific verification + return verifyPlatformContainerSettings(daemon, hostConfig, config, update) +} diff --git a/components/engine/daemon/container_linux.go b/components/engine/daemon/container_linux.go new file mode 100644 index 0000000000..2c8771575a --- /dev/null +++ b/components/engine/daemon/container_linux.go @@ -0,0 +1,29 @@ +//+build !windows + +package daemon + +import ( + "github.com/docker/docker/container" +) + +func (daemon *Daemon) saveApparmorConfig(container *container.Container) error { + container.AppArmorProfile = "" //we don't care about the previous value. + + if !daemon.apparmorEnabled { + return nil // if apparmor is disabled there is nothing to do here. + } + + if err := parseSecurityOpt(container, container.HostConfig); err != nil { + return err + } + + if !container.HostConfig.Privileged { + if container.AppArmorProfile == "" { + container.AppArmorProfile = defaultApparmorProfile + } + + } else { + container.AppArmorProfile = "unconfined" + } + return nil +} diff --git a/components/engine/daemon/container_operations.go b/components/engine/daemon/container_operations.go new file mode 100644 index 0000000000..82ca4701af --- /dev/null +++ b/components/engine/daemon/container_operations.go @@ -0,0 +1,1080 @@ +package daemon + +import ( + "errors" + "fmt" + "net" + "os" + "path" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + derr "github.com/docker/docker/api/errors" + containertypes "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" +) + +var ( + // ErrRootFSReadOnly is returned when a container + // rootfs is marked readonly. + ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") + getPortMapInfo = container.GetSandboxPortMapInfo +) + +func (daemon *Daemon) getDNSSearchSettings(container *container.Container) []string { + if len(container.HostConfig.DNSSearch) > 0 { + return container.HostConfig.DNSSearch + } + + if len(daemon.configStore.DNSSearch) > 0 { + return daemon.configStore.DNSSearch + } + + return nil +} +func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) { + var ( + sboxOptions []libnetwork.SandboxOption + err error + dns []string + dnsOptions []string + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), + libnetwork.OptionDomainname(container.Config.Domainname)) + + if container.HostConfig.NetworkMode.IsHost() { + sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox()) + if len(container.HostConfig.ExtraHosts) == 0 { + sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts")) + } + if len(container.HostConfig.DNS) == 0 && len(daemon.configStore.DNS) == 0 && + len(container.HostConfig.DNSSearch) == 0 && len(daemon.configStore.DNSSearch) == 0 && + len(container.HostConfig.DNSOptions) == 0 && len(daemon.configStore.DNSOptions) == 0 { + sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf")) + } + } else { + // OptionUseExternalKey is mandatory for userns support. + // But optional for non-userns support + sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey()) + } + + if err = setupPathsAndSandboxOptions(container, &sboxOptions); err != nil { + return nil, err + } + + if len(container.HostConfig.DNS) > 0 { + dns = container.HostConfig.DNS + } else if len(daemon.configStore.DNS) > 0 { + dns = daemon.configStore.DNS + } + + for _, d := range dns { + sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) + } + + dnsSearch := daemon.getDNSSearchSettings(container) + + for _, ds := range dnsSearch { + sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) + } + + if len(container.HostConfig.DNSOptions) > 0 { + dnsOptions = container.HostConfig.DNSOptions + } else if len(daemon.configStore.DNSOptions) > 0 { + dnsOptions = daemon.configStore.DNSOptions + } + + for _, ds := range dnsOptions { + sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds)) + } + + if container.NetworkSettings.SecondaryIPAddresses != nil { + name := container.Config.Hostname + if container.Config.Domainname != "" { + name = name + "." + container.Config.Domainname + } + + for _, a := range container.NetworkSettings.SecondaryIPAddresses { + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr)) + } + } + + for _, extraHost := range container.HostConfig.ExtraHosts { + // allow IPv6 addresses in extra hosts; only split on first ":" + if _, err := opts.ValidateExtraHost(extraHost); err != nil { + return nil, err + } + parts := strings.SplitN(extraHost, ":", 2) + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) + } + + if container.HostConfig.PortBindings != nil { + for p, b := range container.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := container.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + sboxOptions = append(sboxOptions, + libnetwork.OptionPortMapping(pbList), + libnetwork.OptionExposedPorts(exposeList)) + + // Legacy Link feature is supported only for the default bridge network. + // return if this call to build join options is not for default bridge network + // Legacy Link is only supported by docker run --link + bridgeSettings, ok := container.NetworkSettings.Networks[defaultNetName] + if !ok || bridgeSettings.EndpointSettings == nil { + return sboxOptions, nil + } + + if bridgeSettings.EndpointID == "" { + return sboxOptions, nil + } + + var ( + childEndpoints, parentEndpoints []string + cEndpointID string + ) + + children := daemon.children(container) + for linkAlias, child := range children { + if !isLinkable(child) { + return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name) + } + _, alias := path.Split(linkAlias) + // allow access to the linked container via the alias, real name, and container hostname + aliasList := alias + " " + child.Config.Hostname + // only add the name if alias isn't equal to the name + if alias != child.Name[1:] { + aliasList = aliasList + " " + child.Name[1:] + } + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks[defaultNetName].IPAddress)) + cEndpointID = child.NetworkSettings.Networks[defaultNetName].EndpointID + if cEndpointID != "" { + childEndpoints = append(childEndpoints, cEndpointID) + } + } + + for alias, parent := range daemon.parents(container) { + if daemon.configStore.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() { + continue + } + + _, alias = path.Split(alias) + logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress) + sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate( + parent.ID, + alias, + bridgeSettings.IPAddress, + )) + if cEndpointID != "" { + parentEndpoints = append(parentEndpoints, cEndpointID) + } + } + + linkOptions := options.Generic{ + netlabel.GenericData: options.Generic{ + "ParentEndpoints": parentEndpoints, + "ChildEndpoints": childEndpoints, + }, + } + + sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions)) + return sboxOptions, nil +} + +func (daemon *Daemon) updateNetworkSettings(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings) error { + if container.NetworkSettings == nil { + container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)} + } + + if !container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { + return runconfig.ErrConflictHostNetwork + } + + for s := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(s) + if err != nil { + continue + } + + if sn.Name() == n.Name() { + // Avoid duplicate config + return nil + } + if !containertypes.NetworkMode(sn.Type()).IsPrivate() || + !containertypes.NetworkMode(n.Type()).IsPrivate() { + return runconfig.ErrConflictSharedNetwork + } + if containertypes.NetworkMode(sn.Name()).IsNone() || + containertypes.NetworkMode(n.Name()).IsNone() { + return runconfig.ErrConflictNoNetwork + } + } + + if _, ok := container.NetworkSettings.Networks[n.Name()]; !ok { + container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + } + } + + return nil +} + +func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep libnetwork.Endpoint) error { + if err := container.BuildEndpointInfo(n, ep); err != nil { + return err + } + + if container.HostConfig.NetworkMode == runconfig.DefaultDaemonNetworkMode() { + container.NetworkSettings.Bridge = daemon.configStore.BridgeConfig.Iface + } + + return nil +} + +// UpdateNetwork is used to update the container's network (e.g. when linked containers +// get removed/unlinked). +func (daemon *Daemon) updateNetwork(container *container.Container) error { + var ( + start = time.Now() + ctrl = daemon.netController + sid = container.NetworkSettings.SandboxID + ) + + sb, err := ctrl.SandboxByID(sid) + if err != nil { + return fmt.Errorf("error locating sandbox id %s: %v", sid, err) + } + + // Find if container is connected to the default bridge network + var n libnetwork.Network + for name := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(name) + if err != nil { + continue + } + if sn.Name() == runconfig.DefaultDaemonNetworkMode().NetworkName() { + n = sn + break + } + } + + if n == nil { + // Not connected to the default bridge network; Nothing to do + return nil + } + + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return fmt.Errorf("Update network failed: %v", err) + } + + if err := sb.Refresh(options...); err != nil { + return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err) + } + + networkActions.WithValues("update").UpdateSince(start) + + return nil +} + +func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrName string, epConfig *networktypes.EndpointSettings) (libnetwork.Network, *networktypes.NetworkingConfig, error) { + n, err := daemon.FindNetwork(idOrName) + if err != nil { + // We should always be able to find the network for a + // managed container. + if container.Managed { + return nil, nil, err + } + } + + // If we found a network and if it is not dynamically created + // we should never attempt to attach to that network here. + if n != nil { + if container.Managed || !n.Info().Dynamic() { + return n, nil, nil + } + } + + var addresses []string + if epConfig != nil && epConfig.IPAMConfig != nil { + if epConfig.IPAMConfig.IPv4Address != "" { + addresses = append(addresses, epConfig.IPAMConfig.IPv4Address) + } + + if epConfig.IPAMConfig.IPv6Address != "" { + addresses = append(addresses, epConfig.IPAMConfig.IPv6Address) + } + } + + var ( + config *networktypes.NetworkingConfig + retryCount int + ) + + for { + // In all other cases, attempt to attach to the network to + // trigger attachment in the swarm cluster manager. + if daemon.clusterProvider != nil { + var err error + config, err = daemon.clusterProvider.AttachNetwork(idOrName, container.ID, addresses) + if err != nil { + return nil, nil, err + } + } + + n, err = daemon.FindNetwork(idOrName) + if err != nil { + if daemon.clusterProvider != nil { + if err := daemon.clusterProvider.DetachNetwork(idOrName, container.ID); err != nil { + logrus.Warnf("Could not rollback attachment for container %s to network %s: %v", container.ID, idOrName, err) + } + } + + // Retry network attach again if we failed to + // find the network after successful + // attachment because the only reason that + // would happen is if some other container + // attached to the swarm scope network went down + // and removed the network while we were in + // the process of attaching. + if config != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { + if retryCount >= 5 { + return nil, nil, fmt.Errorf("could not find network %s after successful attachment", idOrName) + } + retryCount++ + continue + } + } + + return nil, nil, err + } + + break + } + + // This container has attachment to a swarm scope + // network. Update the container network settings accordingly. + container.NetworkSettings.HasSwarmEndpoint = true + return n, config, nil +} + +// updateContainerNetworkSettings updates the network settings +func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) { + var n libnetwork.Network + + mode := container.HostConfig.NetworkMode + if container.Config.NetworkDisabled || mode.IsContainer() { + return + } + + networkName := mode.NetworkName() + if mode.IsDefault() { + networkName = daemon.netController.Config().Daemon.DefaultNetwork + } + + if mode.IsUserDefined() { + var err error + + n, err = daemon.FindNetwork(networkName) + if err == nil { + networkName = n.Name() + } + } + + if container.NetworkSettings == nil { + container.NetworkSettings = &network.Settings{} + } + + if len(endpointsConfig) > 0 { + if container.NetworkSettings.Networks == nil { + container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) + } + + for name, epConfig := range endpointsConfig { + container.NetworkSettings.Networks[name] = &network.EndpointSettings{ + EndpointSettings: epConfig, + } + } + } + + if container.NetworkSettings.Networks == nil { + container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) + container.NetworkSettings.Networks[networkName] = &network.EndpointSettings{ + EndpointSettings: &networktypes.EndpointSettings{}, + } + } + + // Convert any settings added by client in default name to + // engine's default network name key + if mode.IsDefault() { + if nConf, ok := container.NetworkSettings.Networks[mode.NetworkName()]; ok { + container.NetworkSettings.Networks[networkName] = nConf + delete(container.NetworkSettings.Networks, mode.NetworkName()) + } + } + + if !mode.IsUserDefined() { + return + } + // Make sure to internally store the per network endpoint config by network name + if _, ok := container.NetworkSettings.Networks[networkName]; ok { + return + } + + if n != nil { + if nwConfig, ok := container.NetworkSettings.Networks[n.ID()]; ok { + container.NetworkSettings.Networks[networkName] = nwConfig + delete(container.NetworkSettings.Networks, n.ID()) + return + } + } +} + +func (daemon *Daemon) allocateNetwork(container *container.Container) error { + start := time.Now() + controller := daemon.netController + + if daemon.netController == nil { + return nil + } + + // Cleanup any stale sandbox left over due to ungraceful daemon shutdown + if err := controller.SandboxDestroy(container.ID); err != nil { + logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) + } + + if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() { + return nil + } + + updateSettings := false + + if len(container.NetworkSettings.Networks) == 0 { + daemon.updateContainerNetworkSettings(container, nil) + updateSettings = true + } + + // always connect default network first since only default + // network mode support link and we need do some setting + // on sandbox initialize for link, but the sandbox only be initialized + // on first network connecting. + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok { + cleanOperationalData(nConf) + if err := daemon.connectToNetwork(container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil { + return err + } + + } + + // the intermediate map is necessary because "connectToNetwork" modifies "container.NetworkSettings.Networks" + networks := make(map[string]*network.EndpointSettings) + for n, epConf := range container.NetworkSettings.Networks { + if n == defaultNetName { + continue + } + + networks[n] = epConf + } + + for netName, epConf := range networks { + cleanOperationalData(epConf) + if err := daemon.connectToNetwork(container, netName, epConf.EndpointSettings, updateSettings); err != nil { + return err + } + } + + // If the container is not to be connected to any network, + // create its network sandbox now if not present + if len(networks) == 0 { + if nil == daemon.getNetworkSandbox(container) { + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return err + } + sb, err := daemon.netController.NewSandbox(container.ID, options...) + if err != nil { + return err + } + container.UpdateSandboxNetworkSettings(sb) + defer func() { + if err != nil { + sb.Delete() + } + }() + } + + } + + if err := container.WriteHostConfig(); err != nil { + return err + } + networkActions.WithValues("allocate").UpdateSince(start) + return nil +} + +func (daemon *Daemon) getNetworkSandbox(container *container.Container) libnetwork.Sandbox { + var sb libnetwork.Sandbox + daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool { + if s.ContainerID() == container.ID { + sb = s + return true + } + return false + }) + return sb +} + +// hasUserDefinedIPAddress returns whether the passed endpoint configuration contains IP address configuration +func hasUserDefinedIPAddress(epConfig *networktypes.EndpointSettings) bool { + return epConfig != nil && epConfig.IPAMConfig != nil && (len(epConfig.IPAMConfig.IPv4Address) > 0 || len(epConfig.IPAMConfig.IPv6Address) > 0) +} + +// User specified ip address is acceptable only for networks with user specified subnets. +func validateNetworkingConfig(n libnetwork.Network, epConfig *networktypes.EndpointSettings) error { + if n == nil || epConfig == nil { + return nil + } + if !hasUserDefinedIPAddress(epConfig) { + return nil + } + _, _, nwIPv4Configs, nwIPv6Configs := n.Info().IpamConfig() + for _, s := range []struct { + ipConfigured bool + subnetConfigs []*libnetwork.IpamConf + }{ + { + ipConfigured: len(epConfig.IPAMConfig.IPv4Address) > 0, + subnetConfigs: nwIPv4Configs, + }, + { + ipConfigured: len(epConfig.IPAMConfig.IPv6Address) > 0, + subnetConfigs: nwIPv6Configs, + }, + } { + if s.ipConfigured { + foundSubnet := false + for _, cfg := range s.subnetConfigs { + if len(cfg.PreferredPool) > 0 { + foundSubnet = true + break + } + } + if !foundSubnet { + return runconfig.ErrUnsupportedNetworkNoSubnetAndIP + } + } + } + + return nil +} + +// cleanOperationalData resets the operational data from the passed endpoint settings +func cleanOperationalData(es *network.EndpointSettings) { + es.EndpointID = "" + es.Gateway = "" + es.IPAddress = "" + es.IPPrefixLen = 0 + es.IPv6Gateway = "" + es.GlobalIPv6Address = "" + es.GlobalIPv6PrefixLen = 0 + es.MacAddress = "" + if es.IPAMOperational { + es.IPAMConfig = nil + } +} + +func (daemon *Daemon) updateNetworkConfig(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings, updateSettings bool) error { + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + if hasUserDefinedIPAddress(endpointConfig) && !enableIPOnPredefinedNetwork() { + return runconfig.ErrUnsupportedNetworkAndIP + } + if endpointConfig != nil && len(endpointConfig.Aliases) > 0 && !container.EnableServiceDiscoveryOnDefaultNetwork() { + return runconfig.ErrUnsupportedNetworkAndAlias + } + } else { + addShortID := true + shortID := stringid.TruncateID(container.ID) + for _, alias := range endpointConfig.Aliases { + if alias == shortID { + addShortID = false + break + } + } + if addShortID { + endpointConfig.Aliases = append(endpointConfig.Aliases, shortID) + } + } + + if err := validateNetworkingConfig(n, endpointConfig); err != nil { + return err + } + + if updateSettings { + if err := daemon.updateNetworkSettings(container, n, endpointConfig); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) { + start := time.Now() + if container.HostConfig.NetworkMode.IsContainer() { + return runconfig.ErrConflictSharedNetwork + } + if containertypes.NetworkMode(idOrName).IsBridge() && + daemon.configStore.DisableBridge { + container.Config.NetworkDisabled = true + return nil + } + if endpointConfig == nil { + endpointConfig = &networktypes.EndpointSettings{} + } + + n, config, err := daemon.findAndAttachNetwork(container, idOrName, endpointConfig) + if err != nil { + return err + } + if n == nil { + return nil + } + + var operIPAM bool + if config != nil { + if epConfig, ok := config.EndpointsConfig[n.Name()]; ok { + if endpointConfig.IPAMConfig == nil || + (endpointConfig.IPAMConfig.IPv4Address == "" && + endpointConfig.IPAMConfig.IPv6Address == "" && + len(endpointConfig.IPAMConfig.LinkLocalIPs) == 0) { + operIPAM = true + } + + // copy IPAMConfig and NetworkID from epConfig via AttachNetwork + endpointConfig.IPAMConfig = epConfig.IPAMConfig + endpointConfig.NetworkID = epConfig.NetworkID + } + } + + err = daemon.updateNetworkConfig(container, n, endpointConfig, updateSettings) + if err != nil { + return err + } + + controller := daemon.netController + sb := daemon.getNetworkSandbox(container) + createOptions, err := container.BuildCreateEndpointOptions(n, endpointConfig, sb, daemon.configStore.DNS) + if err != nil { + return err + } + + endpointName := strings.TrimPrefix(container.Name, "/") + ep, err := n.CreateEndpoint(endpointName, createOptions...) + if err != nil { + return err + } + defer func() { + if err != nil { + if e := ep.Delete(false); e != nil { + logrus.Warnf("Could not rollback container connection to network %s", idOrName) + } + } + }() + container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + IPAMOperational: operIPAM, + } + if _, ok := container.NetworkSettings.Networks[n.ID()]; ok { + delete(container.NetworkSettings.Networks, n.ID()) + } + + if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil { + return err + } + + if sb == nil { + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return err + } + sb, err = controller.NewSandbox(container.ID, options...) + if err != nil { + return err + } + + container.UpdateSandboxNetworkSettings(sb) + } + + joinOptions, err := container.BuildJoinOptions(n) + if err != nil { + return err + } + + if err := ep.Join(sb, joinOptions...); err != nil { + return err + } + + if !container.Managed { + // add container name/alias to DNS + if err := daemon.ActivateContainerServiceBinding(container.Name); err != nil { + return fmt.Errorf("Activate container service binding for %s failed: %v", container.Name, err) + } + } + + if err := container.UpdateJoinInfo(n, ep); err != nil { + return fmt.Errorf("Updating join info failed: %v", err) + } + + container.NetworkSettings.Ports = getPortMapInfo(sb) + + daemon.LogNetworkEventWithAttributes(n, "connect", map[string]string{"container": container.ID}) + networkActions.WithValues("connect").UpdateSince(start) + return nil +} + +// ForceEndpointDelete deletes an endpoint from a network forcefully +func (daemon *Daemon) ForceEndpointDelete(name string, networkName string) error { + n, err := daemon.FindNetwork(networkName) + if err != nil { + return err + } + + ep, err := n.EndpointByName(name) + if err != nil { + return err + } + return ep.Delete(true) +} + +func (daemon *Daemon) disconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { + var ( + ep libnetwork.Endpoint + sbox libnetwork.Sandbox + ) + + s := func(current libnetwork.Endpoint) bool { + epInfo := current.Info() + if epInfo == nil { + return false + } + if sb := epInfo.Sandbox(); sb != nil { + if sb.ContainerID() == container.ID { + ep = current + sbox = sb + return true + } + } + return false + } + n.WalkEndpoints(s) + + if ep == nil && force { + epName := strings.TrimPrefix(container.Name, "/") + ep, err := n.EndpointByName(epName) + if err != nil { + return err + } + return ep.Delete(force) + } + + if ep == nil { + return fmt.Errorf("container %s is not connected to network %s", container.ID, n.Name()) + } + + if err := ep.Leave(sbox); err != nil { + return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err) + } + + container.NetworkSettings.Ports = getPortMapInfo(sbox) + + if err := ep.Delete(false); err != nil { + return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err) + } + + delete(container.NetworkSettings.Networks, n.Name()) + + daemon.tryDetachContainerFromClusterNetwork(n, container) + + return nil +} + +func (daemon *Daemon) tryDetachContainerFromClusterNetwork(network libnetwork.Network, container *container.Container) { + if daemon.clusterProvider != nil && network.Info().Dynamic() && !container.Managed { + if err := daemon.clusterProvider.DetachNetwork(network.Name(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", network.Name(), err) + if err := daemon.clusterProvider.DetachNetwork(network.ID(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", network.ID(), err) + } + } + } + attributes := map[string]string{ + "container": container.ID, + } + daemon.LogNetworkEventWithAttributes(network, "disconnect", attributes) +} + +func (daemon *Daemon) initializeNetworking(container *container.Container) error { + var err error + + if container.HostConfig.NetworkMode.IsContainer() { + // we need to get the hosts files from the container to join + nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + initializeNetworkingPaths(container, nc) + container.Config.Hostname = nc.Config.Hostname + container.Config.Domainname = nc.Config.Domainname + return nil + } + + if container.HostConfig.NetworkMode.IsHost() { + if container.Config.Hostname == "" { + container.Config.Hostname, err = os.Hostname() + if err != nil { + return err + } + } + } + + if err := daemon.allocateNetwork(container); err != nil { + return err + } + + return container.BuildHostnameFile() +} + +func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) { + nc, err := daemon.GetContainer(connectedContainerID) + if err != nil { + return nil, err + } + if containerID == nc.ID { + return nil, fmt.Errorf("cannot join own network") + } + if !nc.IsRunning() { + err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID) + return nil, derr.NewRequestConflictError(err) + } + if nc.IsRestarting() { + return nil, errContainerIsRestarting(connectedContainerID) + } + return nc, nil +} + +func (daemon *Daemon) releaseNetwork(container *container.Container) { + start := time.Now() + if daemon.netController == nil { + return + } + if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { + return + } + + sid := container.NetworkSettings.SandboxID + settings := container.NetworkSettings.Networks + container.NetworkSettings.Ports = nil + + if sid == "" { + return + } + + var networks []libnetwork.Network + for n, epSettings := range settings { + if nw, err := daemon.FindNetwork(n); err == nil { + networks = append(networks, nw) + } + + if epSettings.EndpointSettings == nil { + continue + } + + cleanOperationalData(epSettings) + } + + sb, err := daemon.netController.SandboxByID(sid) + if err != nil { + logrus.Warnf("error locating sandbox id %s: %v", sid, err) + return + } + + if err := sb.Delete(); err != nil { + logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) + } + + for _, nw := range networks { + daemon.tryDetachContainerFromClusterNetwork(nw, container) + } + networkActions.WithValues("release").UpdateSince(start) +} + +func errRemovalContainer(containerID string) error { + return fmt.Errorf("Container %s is marked for removal and cannot be connected or disconnected to the network", containerID) +} + +// ConnectToNetwork connects a container to a network +func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { + if endpointConfig == nil { + endpointConfig = &networktypes.EndpointSettings{} + } + if !container.Running { + if container.RemovalInProgress || container.Dead { + return errRemovalContainer(container.ID) + } + + n, err := daemon.FindNetwork(idOrName) + if err == nil && n != nil { + if err := daemon.updateNetworkConfig(container, n, endpointConfig, true); err != nil { + return err + } + } else { + container.NetworkSettings.Networks[idOrName] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + } + } + } else if !daemon.isNetworkHotPluggable() { + return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") + } else { + if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { + return err + } + } + if err := container.ToDiskLocking(); err != nil { + return fmt.Errorf("Error saving container to disk: %v", err) + } + return nil +} + +// DisconnectFromNetwork disconnects container from network n. +func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, networkName string, force bool) error { + n, err := daemon.FindNetwork(networkName) + if !container.Running || (err != nil && force) { + if container.RemovalInProgress || container.Dead { + return errRemovalContainer(container.ID) + } + // In case networkName is resolved we will use n.Name() + // this will cover the case where network id is passed. + if n != nil { + networkName = n.Name() + } + if _, ok := container.NetworkSettings.Networks[networkName]; !ok { + return fmt.Errorf("container %s is not connected to the network %s", container.ID, networkName) + } + delete(container.NetworkSettings.Networks, networkName) + } else if err == nil && !daemon.isNetworkHotPluggable() { + return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") + } else if err == nil { + if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { + return runconfig.ErrConflictHostNetwork + } + + if err := daemon.disconnectFromNetwork(container, n, false); err != nil { + return err + } + } else { + return err + } + + if err := container.ToDiskLocking(); err != nil { + return fmt.Errorf("Error saving container to disk: %v", err) + } + + if n != nil { + attributes := map[string]string{ + "container": container.ID, + } + daemon.LogNetworkEventWithAttributes(n, "disconnect", attributes) + } + return nil +} + +// ActivateContainerServiceBinding puts this container into load balancer active rotation and DNS response +func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + sb := daemon.getNetworkSandbox(container) + if sb == nil { + return fmt.Errorf("network sandbox does not exist for container %s", containerName) + } + return sb.EnableService() +} + +// DeactivateContainerServiceBinding removes this container from load balancer active rotation, and DNS response +func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + sb := daemon.getNetworkSandbox(container) + if sb == nil { + // If the network sandbox is not found, then there is nothing to deactivate + logrus.Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName) + return nil + } + return sb.DisableService() +} diff --git a/components/engine/daemon/container_operations_solaris.go b/components/engine/daemon/container_operations_solaris.go new file mode 100644 index 0000000000..1653948de1 --- /dev/null +++ b/components/engine/daemon/container_operations_solaris.go @@ -0,0 +1,46 @@ +// +build solaris + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + return nil, nil +} + +func (daemon *Daemon) setupIpcDirs(container *container.Container) error { + return nil +} + +func killProcessDirectly(container *container.Container) error { + return nil +} + +func detachMounted(path string) error { + return nil +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func enableIPOnPredefinedNetwork() bool { + return false +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return false +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { +} diff --git a/components/engine/daemon/container_operations_unix.go b/components/engine/daemon/container_operations_unix.go new file mode 100644 index 0000000000..6889c10956 --- /dev/null +++ b/components/engine/daemon/container_operations_unix.go @@ -0,0 +1,356 @@ +// +build linux freebsd + +package daemon + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/links" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + var env []string + children := daemon.children(container) + + bridgeSettings := container.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if bridgeSettings == nil || bridgeSettings.EndpointSettings == nil { + return nil, nil + } + + for linkAlias, child := range children { + if !child.IsRunning() { + return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + childBridgeSettings := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if childBridgeSettings == nil || childBridgeSettings.EndpointSettings == nil { + return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) + } + + link := links.NewLink( + bridgeSettings.IPAddress, + childBridgeSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + ) + + env = append(env, link.ToEnv()...) + } + + return env, nil +} + +func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.IpcMode.Container() + container, err := daemon.GetContainer(containerID) + if err != nil { + return nil, errors.Wrapf(err, "cannot join IPC of a non running container: %s", container.ID) + } + return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting) +} + +func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.PidMode.Container() + container, err := daemon.GetContainer(containerID) + if err != nil { + return nil, errors.Wrapf(err, "cannot join PID of a non running container: %s", container.ID) + } + return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting) +} + +func containerIsRunning(c *container.Container) error { + if !c.IsRunning() { + return errors.Errorf("container %s is not running", c.ID) + } + return nil +} + +func containerIsNotRestarting(c *container.Container) error { + if c.IsRestarting() { + return errContainerIsRestarting(c.ID) + } + return nil +} + +func (daemon *Daemon) setupIpcDirs(c *container.Container) error { + var err error + + c.ShmPath, err = c.ShmResourcePath() + if err != nil { + return err + } + + if c.HostConfig.IpcMode.IsContainer() { + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + c.ShmPath = ic.ShmPath + } else if c.HostConfig.IpcMode.IsHost() { + if _, err := os.Stat("/dev/shm"); err != nil { + return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") + } + c.ShmPath = "/dev/shm" + } else { + rootUID, rootGID := daemon.GetRemappedUIDGID() + if !c.HasMountFor("/dev/shm") { + shmPath, err := c.ShmResourcePath() + if err != nil { + return err + } + + if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil { + return err + } + + shmSize := int64(daemon.configStore.ShmSize) + if c.HostConfig.ShmSize != 0 { + shmSize = c.HostConfig.ShmSize + } + shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) + if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { + return fmt.Errorf("mounting shm tmpfs: %s", err) + } + if err := os.Chown(shmPath, rootUID, rootGID); err != nil { + return err + } + } + + } + + return nil +} + +func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + if len(c.SecretReferences) == 0 { + return nil + } + + localMountPath := c.SecretMountPath() + logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) + + // retrieve possible remapped range start for root UID, GID + rootUID, rootGID := daemon.GetRemappedUIDGID() + // create tmpfs + if err := idtools.MkdirAllAs(localMountPath, 0700, rootUID, rootGID); err != nil { + return errors.Wrap(err, "error creating secret local mount path") + } + + defer func() { + if setupErr != nil { + // cleanup + _ = detachMounted(localMountPath) + + if err := os.RemoveAll(localMountPath); err != nil { + logrus.Errorf("error cleaning up secret mount: %s", err) + } + } + }() + + tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootUID, rootGID) + if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "nodev,nosuid,noexec,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to setup secret mount") + } + + if c.DependencyStore == nil { + return fmt.Errorf("secret store is not initialized") + } + + for _, s := range c.SecretReferences { + // TODO (ehazlett): use type switch when more are supported + if s.File == nil { + logrus.Error("secret target type is not a file target") + continue + } + + // secrets are created in the SecretMountPath on the host, at a + // single level + fPath := c.SecretFilePath(*s) + if err := idtools.MkdirAllAs(filepath.Dir(fPath), 0700, rootUID, rootGID); err != nil { + return errors.Wrap(err, "error creating secret mount path") + } + + logrus.WithFields(logrus.Fields{ + "name": s.File.Name, + "path": fPath, + }).Debug("injecting secret") + secret := c.DependencyStore.Secrets().Get(s.SecretID) + if secret == nil { + return fmt.Errorf("unable to get secret from secret store") + } + if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { + return errors.Wrap(err, "error injecting secret") + } + + uid, err := strconv.Atoi(s.File.UID) + if err != nil { + return err + } + gid, err := strconv.Atoi(s.File.GID) + if err != nil { + return err + } + + if err := os.Chown(fPath, rootUID+uid, rootGID+gid); err != nil { + return errors.Wrap(err, "error setting ownership for secret") + } + } + + label.Relabel(localMountPath, c.MountLabel, false) + + // remount secrets ro + if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "remount,ro,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to remount secret dir as readonly") + } + + return nil +} + +func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { + if len(c.ConfigReferences) == 0 { + return nil + } + + localPath := c.ConfigsDirPath() + logrus.Debugf("configs: setting up config dir: %s", localPath) + + // retrieve possible remapped range start for root UID, GID + rootUID, rootGID := daemon.GetRemappedUIDGID() + // create tmpfs + if err := idtools.MkdirAllAs(localPath, 0700, rootUID, rootGID); err != nil { + return errors.Wrap(err, "error creating config dir") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localPath); err != nil { + logrus.Errorf("error cleaning up config dir: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("config store is not initialized") + } + + for _, configRef := range c.ConfigReferences { + // TODO (ehazlett): use type switch when more are supported + if configRef.File == nil { + logrus.Error("config target type is not a file target") + continue + } + + fPath := c.ConfigFilePath(*configRef) + + log := logrus.WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath}) + + if err := idtools.MkdirAllAs(filepath.Dir(fPath), 0700, rootUID, rootGID); err != nil { + return errors.Wrap(err, "error creating config path") + } + + log.Debug("injecting config") + config := c.DependencyStore.Configs().Get(configRef.ConfigID) + if config == nil { + return fmt.Errorf("unable to get config from config store") + } + if err := ioutil.WriteFile(fPath, config.Spec.Data, configRef.File.Mode); err != nil { + return errors.Wrap(err, "error injecting config") + } + + uid, err := strconv.Atoi(configRef.File.UID) + if err != nil { + return err + } + gid, err := strconv.Atoi(configRef.File.GID) + if err != nil { + return err + } + + if err := os.Chown(fPath, rootUID+uid, rootGID+gid); err != nil { + return errors.Wrap(err, "error setting ownership for config") + } + } + + return nil +} + +func killProcessDirectly(cntr *container.Container) error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Block until the container to stops or timeout. + status := <-cntr.Wait(ctx, container.WaitConditionNotRunning) + if status.Err() != nil { + // Ensure that we don't kill ourselves + if pid := cntr.GetPID(); pid != 0 { + logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(cntr.ID)) + if err := syscall.Kill(pid, 9); err != nil { + if err != syscall.ESRCH { + return err + } + e := errNoSuchProcess{pid, 9} + logrus.Debug(e) + return e + } + } + } + return nil +} + +func detachMounted(path string) error { + return syscall.Unmount(path, syscall.MNT_DETACH) +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func enableIPOnPredefinedNetwork() bool { + return false +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return true +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + var err error + + container.HostsPath, err = container.GetRootResourcePath("hosts") + if err != nil { + return err + } + *sboxOptions = append(*sboxOptions, libnetwork.OptionHostsPath(container.HostsPath)) + + container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") + if err != nil { + return err + } + *sboxOptions = append(*sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath)) + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { + container.HostnamePath = nc.HostnamePath + container.HostsPath = nc.HostsPath + container.ResolvConfPath = nc.ResolvConfPath +} diff --git a/components/engine/daemon/container_operations_windows.go b/components/engine/daemon/container_operations_windows.go new file mode 100644 index 0000000000..058089e7ca --- /dev/null +++ b/components/engine/daemon/container_operations_windows.go @@ -0,0 +1,165 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/system" + "github.com/docker/libnetwork" + "github.com/pkg/errors" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + return nil, nil +} + +func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { + if len(c.ConfigReferences) == 0 { + return nil + } + + localPath := c.ConfigsDirPath() + logrus.Debugf("configs: setting up config dir: %s", localPath) + + // create local config root + if err := system.MkdirAllWithACL(localPath, 0); err != nil { + return errors.Wrap(err, "error creating config dir") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localPath); err != nil { + logrus.Errorf("error cleaning up config dir: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("config store is not initialized") + } + + for _, configRef := range c.ConfigReferences { + // TODO (ehazlett): use type switch when more are supported + if configRef.File == nil { + logrus.Error("config target type is not a file target") + continue + } + + fPath := c.ConfigFilePath(*configRef) + + log := logrus.WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath}) + + log.Debug("injecting config") + config := c.DependencyStore.Configs().Get(configRef.ConfigID) + if config == nil { + return fmt.Errorf("unable to get config from config store") + } + if err := ioutil.WriteFile(fPath, config.Spec.Data, configRef.File.Mode); err != nil { + return errors.Wrap(err, "error injecting config") + } + } + + return nil +} + +// getSize returns real size & virtual size +func (daemon *Daemon) getSize(containerID string) (int64, int64) { + // TODO Windows + return 0, 0 +} + +func (daemon *Daemon) setupIpcDirs(container *container.Container) error { + return nil +} + +// TODO Windows: Fix Post-TP5. This is a hack to allow docker cp to work +// against containers which have volumes. You will still be able to cp +// to somewhere on the container drive, but not to any mounted volumes +// inside the container. Without this fix, docker cp is broken to any +// container which has a volume, regardless of where the file is inside the +// container. +func (daemon *Daemon) mountVolumes(container *container.Container) error { + return nil +} + +func detachMounted(path string) error { + return nil +} + +func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + if len(c.SecretReferences) == 0 { + return nil + } + + localMountPath := c.SecretMountPath() + logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) + + // create local secret root + if err := system.MkdirAllWithACL(localMountPath, 0); err != nil { + return errors.Wrap(err, "error creating secret local directory") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localMountPath); err != nil { + logrus.Errorf("error cleaning up secret mount: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("secret store is not initialized") + } + + for _, s := range c.SecretReferences { + // TODO (ehazlett): use type switch when more are supported + if s.File == nil { + logrus.Error("secret target type is not a file target") + continue + } + + // secrets are created in the SecretMountPath on the host, at a + // single level + fPath := c.SecretFilePath(*s) + logrus.WithFields(logrus.Fields{ + "name": s.File.Name, + "path": fPath, + }).Debug("injecting secret") + secret := c.DependencyStore.Secrets().Get(s.SecretID) + if secret == nil { + return fmt.Errorf("unable to get secret from secret store") + } + if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { + return errors.Wrap(err, "error injecting secret") + } + } + + return nil +} + +func killProcessDirectly(container *container.Container) error { + return nil +} + +func isLinkable(child *container.Container) bool { + return false +} + +func enableIPOnPredefinedNetwork() bool { + return true +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return false +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { + container.NetworkSharedContainerID = nc.ID +} diff --git a/components/engine/daemon/container_windows.go b/components/engine/daemon/container_windows.go new file mode 100644 index 0000000000..6fdd1e678e --- /dev/null +++ b/components/engine/daemon/container_windows.go @@ -0,0 +1,11 @@ +//+build windows + +package daemon + +import ( + "github.com/docker/docker/container" +) + +func (daemon *Daemon) saveApparmorConfig(container *container.Container) error { + return nil +} diff --git a/components/engine/daemon/create.go b/components/engine/daemon/create.go new file mode 100644 index 0000000000..de129ca60d --- /dev/null +++ b/components/engine/daemon/create.go @@ -0,0 +1,307 @@ +package daemon + +import ( + "fmt" + "net" + "runtime" + "strings" + "time" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/opencontainers/selinux/go-selinux/label" +) + +// CreateManagedContainer creates a container that is managed by a Service +func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(params, true) +} + +// ContainerCreate creates a regular container +func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(params, false) +} + +func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool) (containertypes.ContainerCreateCreatedBody, error) { + start := time.Now() + if params.Config == nil { + return containertypes.ContainerCreateCreatedBody{}, fmt.Errorf("Config cannot be empty in order to create a container") + } + + warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + err = daemon.verifyNetworkingConfig(params.NetworkingConfig) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + if params.HostConfig == nil { + params.HostConfig = &containertypes.HostConfig{} + } + err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + container, err := daemon.create(params, managed) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, daemon.imageNotExistToErrcode(err) + } + containerActions.WithValues("create").UpdateSince(start) + + return containertypes.ContainerCreateCreatedBody{ID: container.ID, Warnings: warnings}, nil +} + +// Create creates a new container from the given configuration with a given name. +func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) { + var ( + container *container.Container + img *image.Image + imgID image.ID + err error + ) + + if params.Config.Image != "" { + img, err = daemon.GetImage(params.Config.Image) + if err != nil { + return nil, err + } + + if runtime.GOOS == "solaris" && img.OS != "solaris " { + return nil, errors.New("Platform on which parent image was created is not Solaris") + } + imgID = img.ID() + } + + if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { + return nil, err + } + + if err := daemon.mergeAndVerifyLogConfig(¶ms.HostConfig.LogConfig); err != nil { + return nil, err + } + + if container, err = daemon.newContainer(params.Name, params.Config, params.HostConfig, imgID, managed); err != nil { + return nil, err + } + defer func() { + if retErr != nil { + if err := daemon.cleanupContainer(container, true, true); err != nil { + logrus.Errorf("failed to cleanup container on create error: %v", err) + } + } + }() + + if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { + return nil, err + } + + container.HostConfig.StorageOpt = params.HostConfig.StorageOpt + + // Set RWLayer for container after mount labels have been set + if err := daemon.setRWLayer(container); err != nil { + return nil, err + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { + return nil, err + } + if err := idtools.MkdirAs(container.CheckpointDir(), 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := daemon.setHostConfig(container, params.HostConfig); err != nil { + return nil, err + } + + if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { + return nil, err + } + + var endpointsConfigs map[string]*networktypes.EndpointSettings + if params.NetworkingConfig != nil { + endpointsConfigs = params.NetworkingConfig.EndpointsConfig + } + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + runconfig.SetDefaultNetModeIfBlank(container.HostConfig) + + daemon.updateContainerNetworkSettings(container, endpointsConfigs) + + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving new container to disk: %v", err) + return nil, err + } + daemon.Register(container) + stateCtr.set(container.ID, "stopped") + daemon.LogContainerEvent(container, "create") + return container, nil +} + +func toHostConfigSelinuxLabels(labels []string) []string { + for i, l := range labels { + labels[i] = "label=" + l + } + return labels +} + +func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) ([]string, error) { + for _, opt := range hostConfig.SecurityOpt { + con := strings.Split(opt, "=") + if con[0] == "label" { + // Caller overrode SecurityOpts + return nil, nil + } + } + ipcMode := hostConfig.IpcMode + pidMode := hostConfig.PidMode + privileged := hostConfig.Privileged + if ipcMode.IsHost() || pidMode.IsHost() || privileged { + return toHostConfigSelinuxLabels(label.DisableSecOpt()), nil + } + + var ipcLabel []string + var pidLabel []string + ipcContainer := ipcMode.Container() + pidContainer := pidMode.Container() + if ipcContainer != "" { + c, err := daemon.GetContainer(ipcContainer) + if err != nil { + return nil, err + } + ipcLabel = label.DupSecOpt(c.ProcessLabel) + if pidContainer == "" { + return toHostConfigSelinuxLabels(ipcLabel), err + } + } + if pidContainer != "" { + c, err := daemon.GetContainer(pidContainer) + if err != nil { + return nil, err + } + + pidLabel = label.DupSecOpt(c.ProcessLabel) + if ipcContainer == "" { + return toHostConfigSelinuxLabels(pidLabel), err + } + } + + if pidLabel != nil && ipcLabel != nil { + for i := 0; i < len(pidLabel); i++ { + if pidLabel[i] != ipcLabel[i] { + return nil, fmt.Errorf("--ipc and --pid containers SELinux labels aren't the same") + } + } + return toHostConfigSelinuxLabels(pidLabel), nil + } + return nil, nil +} + +func (daemon *Daemon) setRWLayer(container *container.Container) error { + var layerID layer.ChainID + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return err + } + layerID = img.RootFS.ChainID() + } + + rwLayerOpts := &layer.CreateRWLayerOpts{ + MountLabel: container.MountLabel, + InitFunc: daemon.getLayerInit(), + StorageOpt: container.HostConfig.StorageOpt, + } + + rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, rwLayerOpts) + if err != nil { + return err + } + container.RWLayer = rwLayer + + return nil +} + +// VolumeCreate creates a volume with the specified name, driver, and opts +// This is called directly from the Engine API +func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) { + if name == "" { + name = stringid.GenerateNonCryptoID() + } + + v, err := daemon.volumes.Create(name, driverName, opts, labels) + if err != nil { + return nil, err + } + + daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) + apiV := volumeToAPIType(v) + apiV.Mountpoint = v.Path() + return apiV, nil +} + +func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { + if img != nil && img.Config != nil { + if err := merge(config, img.Config); err != nil { + return err + } + } + // Reset the Entrypoint if it is [""] + if len(config.Entrypoint) == 1 && config.Entrypoint[0] == "" { + config.Entrypoint = nil + } + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { + return fmt.Errorf("No command specified") + } + return nil +} + +// Checks if the client set configurations for more than one network while creating a container +// Also checks if the IPAMConfig is valid +func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { + if nwConfig == nil || len(nwConfig.EndpointsConfig) == 0 { + return nil + } + if len(nwConfig.EndpointsConfig) == 1 { + for _, v := range nwConfig.EndpointsConfig { + if v != nil && v.IPAMConfig != nil { + if v.IPAMConfig.IPv4Address != "" && net.ParseIP(v.IPAMConfig.IPv4Address).To4() == nil { + return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv4 address: %s", v.IPAMConfig.IPv4Address)) + } + if v.IPAMConfig.IPv6Address != "" { + n := net.ParseIP(v.IPAMConfig.IPv6Address) + // if the address is an invalid network address (ParseIP == nil) or if it is + // an IPv4 address (To4() != nil), then it is an invalid IPv6 address + if n == nil || n.To4() != nil { + return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv6 address: %s", v.IPAMConfig.IPv6Address)) + } + } + } + } + return nil + } + l := make([]string, 0, len(nwConfig.EndpointsConfig)) + for k := range nwConfig.EndpointsConfig { + l = append(l, k) + } + err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) + return apierrors.NewBadRequestError(err) +} diff --git a/components/engine/daemon/create_unix.go b/components/engine/daemon/create_unix.go new file mode 100644 index 0000000000..7b067bdc22 --- /dev/null +++ b/components/engine/daemon/create_unix.go @@ -0,0 +1,81 @@ +// +build !windows + +package daemon + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/selinux/go-selinux/label" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + if err := daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + + for spec := range config.Volumes { + name := stringid.GenerateNonCryptoID() + destination := filepath.Clean(spec) + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(destination) { + continue + } + path, err := container.GetResourcePath(destination) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err == nil && !stat.IsDir() { + return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) + } + + v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { + return err + } + + container.AddMountPointWithVolume(destination, v, true) + } + return daemon.populateVolumes(container) +} + +// populateVolumes copies data from the container's rootfs into the volume for non-binds. +// this is only called when the container is created. +func (daemon *Daemon) populateVolumes(c *container.Container) error { + for _, mnt := range c.MountPoints { + if mnt.Volume == nil { + continue + } + + if mnt.Type != mounttypes.TypeVolume || !mnt.CopyData { + continue + } + + logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) + if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { + return err + } + } + return nil +} diff --git a/components/engine/daemon/create_windows.go b/components/engine/daemon/create_windows.go new file mode 100644 index 0000000000..bbf0dbe7b9 --- /dev/null +++ b/components/engine/daemon/create_windows.go @@ -0,0 +1,80 @@ +package daemon + +import ( + "fmt" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + // Make sure the host config has the default daemon isolation if not specified by caller. + if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) { + hostConfig.Isolation = daemon.defaultIsolation + } + + for spec := range config.Volumes { + + mp, err := volume.ParseMountRaw(spec, hostConfig.VolumeDriver) + if err != nil { + return fmt.Errorf("Unrecognised volume spec: %v", err) + } + + // If the mountpoint doesn't have a name, generate one. + if len(mp.Name) == 0 { + mp.Name = stringid.GenerateNonCryptoID() + } + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(mp.Destination) { + continue + } + + volumeDriver := hostConfig.VolumeDriver + + // Create the volume in the volume driver. If it doesn't exist, + // a new one will be created. + v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + // FIXME Windows: This code block is present in the Linux version and + // allows the contents to be copied to the container FS prior to it + // being started. However, the function utilizes the FollowSymLinkInScope + // path which does not cope with Windows volume-style file paths. There + // is a separate effort to resolve this (@swernli), so this processing + // is deferred for now. A case where this would be useful is when + // a dockerfile includes a VOLUME statement, but something is created + // in that directory during the dockerfile processing. What this means + // on Windows for TP5 is that in that scenario, the contents will not + // copied, but that's (somewhat) OK as HCS will bomb out soon after + // at it doesn't support mapped directories which have contents in the + // destination path anyway. + // + // Example for repro later: + // FROM windowsservercore + // RUN mkdir c:\myvol + // RUN copy c:\windows\system32\ntdll.dll c:\myvol + // VOLUME "c:\myvol" + // + // Then + // docker build -t vol . + // docker run -it --rm vol cmd <-- This is where HCS will error out. + // + // // never attempt to copy existing content in a container FS to a shared volume + // if v.DriverName() == volume.DefaultDriverName { + // if err := container.CopyImagePathContent(v, mp.Destination); err != nil { + // return err + // } + // } + + // Add it to container.MountPoints + container.AddMountPointWithVolume(mp.Destination, v, mp.RW) + } + return nil +} diff --git a/components/engine/daemon/daemon.go b/components/engine/daemon/daemon.go new file mode 100644 index 0000000000..3d45c71bf8 --- /dev/null +++ b/components/engine/daemon/daemon.go @@ -0,0 +1,1177 @@ +// Package daemon exposes the functions that occur on the host server +// that the Docker daemon is running. +// +// In implementing the various functions of the daemon, there is often +// a method-specific struct for configuring the runtime behavior. +package daemon + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/discovery" + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + // register graph drivers + _ "github.com/docker/docker/daemon/graphdriver/register" + "github.com/docker/docker/daemon/initlayer" + "github.com/docker/docker/daemon/stats" + dmetadata "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/migrate/v1" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/plugin" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/cluster" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libtrust" + "github.com/pkg/errors" +) + +var ( + // DefaultRuntimeBinary is the default runtime to be used by + // containerd if none is specified + DefaultRuntimeBinary = "docker-runc" + + errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.") +) + +// Daemon holds information about the Docker daemon. +type Daemon struct { + ID string + repository string + containers container.Store + execCommands *exec.Store + referenceStore refstore.Store + downloadManager *xfer.LayerDownloadManager + uploadManager *xfer.LayerUploadManager + distributionMetadataStore dmetadata.Store + trustKey libtrust.PrivateKey + idIndex *truncindex.TruncIndex + configStore *config.Config + statsCollector *stats.Collector + defaultLogConfig containertypes.LogConfig + RegistryService registry.Service + EventsService *events.Events + netController libnetwork.NetworkController + volumes *store.VolumeStore + discoveryWatcher discovery.Reloader + root string + seccompEnabled bool + apparmorEnabled bool + shutdown bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + layerStore layer.Store + imageStore image.Store + PluginStore *plugin.Store // todo: remove + pluginManager *plugin.Manager + nameIndex *registrar.Registrar + linkIndex *linkIndex + containerd libcontainerd.Client + containerdRemote libcontainerd.Remote + defaultIsolation containertypes.Isolation // Default isolation mode on Windows + clusterProvider cluster.Provider + cluster Cluster + metricsPluginListener net.Listener + + machineMemory uint64 + + seccompProfile []byte + seccompProfilePath string + + diskUsageRunning int32 + pruneRunning int32 +} + +// HasExperimental returns whether the experimental features of the daemon are enabled or not +func (daemon *Daemon) HasExperimental() bool { + if daemon.configStore != nil && daemon.configStore.Experimental { + return true + } + return false +} + +func (daemon *Daemon) restore() error { + var ( + currentDriver = daemon.GraphDriverName() + containers = make(map[string]*container.Container) + ) + + logrus.Info("Loading containers: start.") + + dir, err := ioutil.ReadDir(daemon.repository) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + container, err := daemon.load(id) + if err != nil { + logrus.Errorf("Failed to load container %v: %v", id, err) + continue + } + + // Ignore the container if it does not support the current driver being used by the graph + if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { + rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) + if err != nil { + logrus.Errorf("Failed to load container mount %v: %v", id, err) + continue + } + container.RWLayer = rwlayer + logrus.Debugf("Loaded container %v", container.ID) + + containers[container.ID] = container + } else { + logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + } + } + + removeContainers := make(map[string]*container.Container) + restartContainers := make(map[*container.Container]chan struct{}) + activeSandboxes := make(map[string]interface{}) + for id, c := range containers { + if err := daemon.registerName(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + delete(containers, id) + continue + } + daemon.Register(c) + + // verify that all volumes valid and have been migrated from the pre-1.7 layout + if err := daemon.verifyVolumesInfo(c); err != nil { + // don't skip the container due to error + logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err) + } + + // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. + // We should rewrite it to use the daemon defaults. + // Fixes https://github.com/docker/docker/issues/22536 + if c.HostConfig.LogConfig.Type == "" { + if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { + logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) + continue + } + } + } + + var wg sync.WaitGroup + var mapLock sync.Mutex + for _, c := range containers { + wg.Add(1) + go func(c *container.Container) { + defer wg.Done() + daemon.backportMountSpec(c) + if err := c.ToDiskLocking(); err != nil { + logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk") + } + + daemon.setStateCounter(c) + if c.IsRunning() || c.IsPaused() { + c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking + if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil { + logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err) + return + } + + // we call Mount and then Unmount to get BaseFs of the container + if err := daemon.Mount(c); err != nil { + // The mount is unlikely to fail. However, in case mount fails + // the container should be allowed to restore here. Some functionalities + // (like docker exec -u user) might be missing but container is able to be + // stopped/restarted/removed. + // See #29365 for related information. + // The error is only logged here. + logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) + } else { + if err := daemon.Unmount(c); err != nil { + logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) + } + } + + c.ResetRestartManager(false) + if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { + options, err := daemon.buildSandboxOptions(c) + if err != nil { + logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) + } + mapLock.Lock() + activeSandboxes[c.NetworkSettings.SandboxID] = options + mapLock.Unlock() + } + + } + // fixme: only if not running + // get list of containers we need to restart + if !c.IsRunning() && !c.IsPaused() { + // Do not autostart containers which + // has endpoints in a swarm scope + // network yet since the cluster is + // not initialized yet. We will start + // it after the cluster is + // initialized. + if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint { + mapLock.Lock() + restartContainers[c] = make(chan struct{}) + mapLock.Unlock() + } else if c.HostConfig != nil && c.HostConfig.AutoRemove { + mapLock.Lock() + removeContainers[c.ID] = c + mapLock.Unlock() + } + } + + if c.RemovalInProgress { + // We probably crashed in the middle of a removal, reset + // the flag. + // + // We DO NOT remove the container here as we do not + // know if the user had requested for either the + // associated volumes, network links or both to also + // be removed. So we put the container in the "dead" + // state and leave further processing up to them. + logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) + c.ResetRemovalInProgress() + c.SetDead() + c.ToDisk() + } + }(c) + } + wg.Wait() + daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) + if err != nil { + return fmt.Errorf("Error initializing network controller: %v", err) + } + + // Now that all the containers are registered, register the links + for _, c := range containers { + if err := daemon.registerLinks(c, c.HostConfig); err != nil { + logrus.Errorf("failed to register link for container %s: %v", c.ID, err) + } + } + + group := sync.WaitGroup{} + for c, notifier := range restartContainers { + group.Add(1) + + go func(c *container.Container, chNotify chan struct{}) { + defer group.Done() + + logrus.Debugf("Starting container %s", c.ID) + + // ignore errors here as this is a best effort to wait for children to be + // running before we try to start the container + children := daemon.children(c) + timeout := time.After(5 * time.Second) + for _, child := range children { + if notifier, exists := restartContainers[child]; exists { + select { + case <-notifier: + case <-timeout: + } + } + } + + // Make sure networks are available before starting + daemon.waitForNetworks(c) + if err := daemon.containerStart(c, "", "", true); err != nil { + logrus.Errorf("Failed to start container %s: %s", c.ID, err) + } + close(chNotify) + }(c, notifier) + + } + group.Wait() + + removeGroup := sync.WaitGroup{} + for id := range removeContainers { + removeGroup.Add(1) + go func(cid string) { + if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("Failed to remove container %s: %s", cid, err) + } + removeGroup.Done() + }(id) + } + removeGroup.Wait() + + // any containers that were started above would already have had this done, + // however we need to now prepare the mountpoints for the rest of the containers as well. + // This shouldn't cause any issue running on the containers that already had this run. + // This must be run after any containers with a restart policy so that containerized plugins + // can have a chance to be running before we try to initialize them. + for _, c := range containers { + // if the container has restart policy, do not + // prepare the mountpoints since it has been done on restarting. + // This is to speed up the daemon start when a restart container + // has a volume and the volume driver is not available. + if _, ok := restartContainers[c]; ok { + continue + } else if _, ok := removeContainers[c.ID]; ok { + // container is automatically removed, skip it. + continue + } + + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.prepareMountPoints(c); err != nil { + logrus.Error(err) + } + }(c) + } + + group.Wait() + + logrus.Info("Loading containers: done.") + + return nil +} + +// RestartSwarmContainers restarts any autostart container which has a +// swarm endpoint. +func (daemon *Daemon) RestartSwarmContainers() { + group := sync.WaitGroup{} + for _, c := range daemon.List() { + if !c.IsRunning() && !c.IsPaused() { + // Autostart all the containers which has a + // swarm endpoint now that the cluster is + // initialized. + if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint { + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.containerStart(c, "", "", true); err != nil { + logrus.Error(err) + } + }(c) + } + } + + } + group.Wait() +} + +// waitForNetworks is used during daemon initialization when starting up containers +// It ensures that all of a container's networks are available before the daemon tries to start the container. +// In practice it just makes sure the discovery service is available for containers which use a network that require discovery. +func (daemon *Daemon) waitForNetworks(c *container.Container) { + if daemon.discoveryWatcher == nil { + return + } + // Make sure if the container has a network that requires discovery that the discovery service is available before starting + for netName := range c.NetworkSettings.Networks { + // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready + // Most likely this is because the K/V store used for discovery is in a container and needs to be started + if _, err := daemon.netController.NetworkByName(netName); err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { + continue + } + // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host + // FIXME: why is this slow??? + logrus.Debugf("Container %s waiting for network to be ready", c.Name) + select { + case <-daemon.discoveryWatcher.ReadyCh(): + case <-time.After(60 * time.Second): + } + return + } + } +} + +func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.children(c) +} + +// parents returns the names of the parent containers of the container +// with the given name. +func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.parents(c) +} + +func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { + fullName := path.Join(parent.Name, alias) + if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { + if err == registrar.ErrNameReserved { + logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) + return nil + } + return err + } + daemon.linkIndex.link(parent, child, fullName) + return nil +} + +// DaemonJoinsCluster informs the daemon has joined the cluster and provides +// the handler to query the cluster component +func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) { + daemon.setClusterProvider(clusterProvider) +} + +// DaemonLeavesCluster informs the daemon has left the cluster +func (daemon *Daemon) DaemonLeavesCluster() { + // Daemon is in charge of removing the attachable networks with + // connected containers when the node leaves the swarm + daemon.clearAttachableNetworks() + // We no longer need the cluster provider, stop it now so that + // the network agent will stop listening to cluster events. + daemon.setClusterProvider(nil) + // Wait for the networking cluster agent to stop + daemon.netController.AgentStopWait() + // Daemon is in charge of removing the ingress network when the + // node leaves the swarm. Wait for job to be done or timeout. + // This is called also on graceful daemon shutdown. We need to + // wait, because the ingress release has to happen before the + // network controller is stopped. + if done, err := daemon.ReleaseIngress(); err == nil { + select { + case <-done: + case <-time.After(5 * time.Second): + logrus.Warnf("timeout while waiting for ingress network removal") + } + } else { + logrus.Warnf("failed to initiate ingress network removal: %v", err) + } +} + +// setClusterProvider sets a component for querying the current cluster state. +func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) { + daemon.clusterProvider = clusterProvider + daemon.netController.SetClusterProvider(clusterProvider) +} + +// IsSwarmCompatible verifies if the current daemon +// configuration is compatible with the swarm mode +func (daemon *Daemon) IsSwarmCompatible() error { + if daemon.configStore == nil { + return nil + } + return daemon.configStore.IsSwarmCompatible() +} + +// NewDaemon sets up everything for the daemon to be able to service +// requests from the webserver. +func NewDaemon(config *config.Config, registryService registry.Service, containerdRemote libcontainerd.Remote, pluginStore *plugin.Store) (daemon *Daemon, err error) { + setDefaultMtu(config) + + // Ensure that we have a correct root key limit for launching containers. + if err := ModifyRootKeyLimit(); err != nil { + logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) + } + + // Ensure we have compatible and valid configuration options + if err := verifyDaemonSettings(config); err != nil { + return nil, err + } + + // Do we have a disabled network? + config.DisableBridge = isBridgeNetworkDisabled(config) + + // Verify the platform is supported as a daemon + if !platformSupported { + return nil, errSystemNotSupported + } + + // Validate platform-specific requirements + if err := checkSystem(); err != nil { + return nil, err + } + + uidMaps, gidMaps, err := setupRemappedRoot(config) + if err != nil { + return nil, err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := setupDaemonProcess(config); err != nil { + return nil, err + } + + // set up the tmpDir to use a canonical path + tmp, err := prepareTempDir(config.Root, rootUID, rootGID) + if err != nil { + return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) + } + realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + } + os.Setenv("TMPDIR", realTmp) + + d := &Daemon{configStore: config} + // Ensure the daemon is properly shutdown if there is a failure during + // initialization + defer func() { + if err != nil { + if err := d.Shutdown(); err != nil { + logrus.Error(err) + } + } + }() + + // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event + // on Windows to dump Go routine stacks + stackDumpDir := config.Root + if execRoot := config.GetExecRoot(); execRoot != "" { + stackDumpDir = execRoot + } + d.setupDumpStackTrap(stackDumpDir) + + if err := d.setupSeccompProfile(); err != nil { + return nil, err + } + + // Set the default isolation mode (only applicable on Windows) + if err := d.setDefaultIsolation(); err != nil { + return nil, fmt.Errorf("error setting default isolation mode: %v", err) + } + + logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) + + if err := configureMaxThreads(config); err != nil { + logrus.Warnf("Failed to configure golang's threads limit: %v", err) + } + + if err := ensureDefaultAppArmorProfile(); err != nil { + logrus.Errorf(err.Error()) + } + + daemonRepo := filepath.Join(config.Root, "containers") + if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if runtime.GOOS == "windows" { + if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil && !os.IsExist(err) { + return nil, err + } + } + + driverName := os.Getenv("DOCKER_DRIVER") + if driverName == "" { + driverName = config.GraphDriver + } + + d.RegistryService = registryService + d.PluginStore = pluginStore + logger.RegisterPluginGetter(d.PluginStore) + + metricsSockPath, err := d.listenMetricsSock() + if err != nil { + return nil, err + } + registerMetricsPluginCallback(d.PluginStore, metricsSockPath) + + // Plugin system initialization should happen before restore. Do not change order. + d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ + Root: filepath.Join(config.Root, "plugins"), + ExecRoot: getPluginExecRoot(config.Root), + Store: d.PluginStore, + Executor: containerdRemote, + RegistryService: registryService, + LiveRestoreEnabled: config.LiveRestoreEnabled, + LogPluginEvent: d.LogPluginEvent, // todo: make private + AuthzMiddleware: config.AuthzMiddleware, + }) + if err != nil { + return nil, errors.Wrap(err, "couldn't create plugin manager") + } + + d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ + StorePath: config.Root, + MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), + GraphDriver: driverName, + GraphDriverOptions: config.GraphOptions, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + PluginGetter: d.PluginStore, + ExperimentalEnabled: config.Experimental, + }) + if err != nil { + return nil, err + } + + graphDriver := d.layerStore.DriverName() + imageRoot := filepath.Join(config.Root, "image", graphDriver) + + // Configure and validate the kernels security support + if err := configureKernelSecuritySupport(config, graphDriver); err != nil { + return nil, err + } + + logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) + d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) + logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) + d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) + + ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) + if err != nil { + return nil, err + } + + d.imageStore, err = image.NewImageStore(ifs, d.layerStore) + if err != nil { + return nil, err + } + + // Configure the volumes driver + volStore, err := d.configureVolumes(rootUID, rootGID) + if err != nil { + return nil, err + } + + trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + if err != nil { + return nil, err + } + + trustDir := filepath.Join(config.Root, "trust") + + if err := system.MkdirAll(trustDir, 0700); err != nil { + return nil, err + } + + distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) + if err != nil { + return nil, err + } + + eventsService := events.New() + + referenceStore, err := refstore.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) + } + + migrationStart := time.Now() + if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { + logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) + } + logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) + + // Discovery is only enabled when the daemon is launched with an address to advertise. When + // initialized, the daemon is registered and we can store the discovery backend as it's read-only + if err := d.initDiscovery(config); err != nil { + return nil, err + } + + sysInfo := sysinfo.New(false) + // Check if Devices cgroup is mounted, it is hard requirement for container security, + // on Linux. + if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { + return nil, errors.New("Devices cgroup isn't mounted") + } + + d.ID = trustKey.PublicKey().KeyID() + d.repository = daemonRepo + d.containers = container.NewMemoryStore() + d.execCommands = exec.NewStore() + d.referenceStore = referenceStore + d.distributionMetadataStore = distributionMetadataStore + d.trustKey = trustKey + d.idIndex = truncindex.NewTruncIndex([]string{}) + d.statsCollector = d.newStatsCollector(1 * time.Second) + d.defaultLogConfig = containertypes.LogConfig{ + Type: config.LogConfig.Type, + Config: config.LogConfig.Config, + } + d.EventsService = eventsService + d.volumes = volStore + d.root = config.Root + d.uidMaps = uidMaps + d.gidMaps = gidMaps + d.seccompEnabled = sysInfo.Seccomp + d.apparmorEnabled = sysInfo.AppArmor + + d.nameIndex = registrar.NewRegistrar() + d.linkIndex = newLinkIndex() + d.containerdRemote = containerdRemote + + go d.execCommandGC() + + d.containerd, err = containerdRemote.Client(d) + if err != nil { + return nil, err + } + + if err := d.restore(); err != nil { + return nil, err + } + + // FIXME: this method never returns an error + info, _ := d.SystemInfo() + + engineInfo.WithValues( + dockerversion.Version, + dockerversion.GitCommit, + info.Architecture, + info.Driver, + info.KernelVersion, + info.OperatingSystem, + info.OSType, + info.ID, + ).Set(1) + engineCpus.Set(float64(info.NCPU)) + engineMemory.Set(float64(info.MemTotal)) + + return d, nil +} + +func (daemon *Daemon) shutdownContainer(c *container.Container) error { + stopTimeout := c.StopTimeout() + // TODO(windows): Handle docker restart with paused containers + if c.IsPaused() { + // To terminate a process in freezer cgroup, we should send + // SIGTERM to this process then unfreeze it, and the process will + // force to terminate immediately. + logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID) + sig, ok := signal.SignalMap["TERM"] + if !ok { + return errors.New("System does not support SIGTERM") + } + if err := daemon.kill(c, int(sig)); err != nil { + return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) + } + if err := daemon.containerUnpause(c); err != nil { + return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(stopTimeout)*time.Second) + defer cancel() + + // Wait with timeout for container to exit. + if status := <-c.Wait(ctx, container.WaitConditionNotRunning); status.Err() != nil { + logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout) + sig, ok := signal.SignalMap["KILL"] + if !ok { + return errors.New("System does not support SIGKILL") + } + if err := daemon.kill(c, int(sig)); err != nil { + logrus.Errorf("Failed to SIGKILL container %s", c.ID) + } + // Wait for exit again without a timeout. + // Explicitly ignore the result. + _ = <-c.Wait(context.Background(), container.WaitConditionNotRunning) + return status.Err() + } + } + // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force + if err := daemon.containerStop(c, stopTimeout); err != nil { + return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) + } + + // Wait without timeout for the container to exit. + // Ignore the result. + _ = <-c.Wait(context.Background(), container.WaitConditionNotRunning) + return nil +} + +// ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers, +// and is limited by daemon's ShutdownTimeout. +func (daemon *Daemon) ShutdownTimeout() int { + // By default we use daemon's ShutdownTimeout. + shutdownTimeout := daemon.configStore.ShutdownTimeout + + graceTimeout := 5 + if daemon.containers != nil { + for _, c := range daemon.containers.List() { + if shutdownTimeout >= 0 { + stopTimeout := c.StopTimeout() + if stopTimeout < 0 { + shutdownTimeout = -1 + } else { + if stopTimeout+graceTimeout > shutdownTimeout { + shutdownTimeout = stopTimeout + graceTimeout + } + } + } + } + } + return shutdownTimeout +} + +// Shutdown stops the daemon. +func (daemon *Daemon) Shutdown() error { + daemon.shutdown = true + // Keep mounts and networking running on daemon shutdown if + // we are to keep containers running and restore them. + + if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { + // check if there are any running containers, if none we should do some cleanup + if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { + // metrics plugins still need some cleanup + daemon.cleanupMetricsPlugins() + return nil + } + } + + if daemon.containers != nil { + logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout) + daemon.containers.ApplyAll(func(c *container.Container) { + if !c.IsRunning() { + return + } + logrus.Debugf("stopping %s", c.ID) + if err := daemon.shutdownContainer(c); err != nil { + logrus.Errorf("Stop container error: %v", err) + return + } + if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + logrus.Debugf("container stopped %s", c.ID) + }) + } + + if daemon.volumes != nil { + if err := daemon.volumes.Shutdown(); err != nil { + logrus.Errorf("Error shutting down volume store: %v", err) + } + } + + if daemon.layerStore != nil { + if err := daemon.layerStore.Cleanup(); err != nil { + logrus.Errorf("Error during layer Store.Cleanup(): %v", err) + } + } + + // If we are part of a cluster, clean up cluster's stuff + if daemon.clusterProvider != nil { + logrus.Debugf("start clean shutdown of cluster resources...") + daemon.DaemonLeavesCluster() + } + + daemon.cleanupMetricsPlugins() + + // Shutdown plugins after containers and layerstore. Don't change the order. + daemon.pluginShutdown() + + // trigger libnetwork Stop only if it's initialized + if daemon.netController != nil { + daemon.netController.Stop() + } + + if err := daemon.cleanupMounts(); err != nil { + return err + } + + return nil +} + +// Mount sets container.BaseFS +// (is it not set coming in? why is it unset?) +func (daemon *Daemon) Mount(container *container.Container) error { + dir, err := container.RWLayer.Mount(container.GetMountLabel()) + if err != nil { + return err + } + logrus.Debugf("container mounted via layerStore: %v", dir) + + if container.BaseFS != dir { + // The mount path reported by the graph driver should always be trusted on Windows, since the + // volume path for a given mounted layer may change over time. This should only be an error + // on non-Windows operating systems. + if container.BaseFS != "" && runtime.GOOS != "windows" { + daemon.Unmount(container) + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + daemon.GraphDriverName(), container.ID, container.BaseFS, dir) + } + } + container.BaseFS = dir // TODO: combine these fields + return nil +} + +// Unmount unsets the container base filesystem +func (daemon *Daemon) Unmount(container *container.Container) error { + if err := container.RWLayer.Unmount(); err != nil { + logrus.Errorf("Error unmounting container %s: %s", container.ID, err) + return err + } + + return nil +} + +// Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker. +func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) { + var v4Subnets []net.IPNet + var v6Subnets []net.IPNet + + managedNetworks := daemon.netController.Networks() + + for _, managedNetwork := range managedNetworks { + v4infos, v6infos := managedNetwork.Info().IpamInfo() + for _, info := range v4infos { + if info.IPAMData.Pool != nil { + v4Subnets = append(v4Subnets, *info.IPAMData.Pool) + } + } + for _, info := range v6infos { + if info.IPAMData.Pool != nil { + v6Subnets = append(v6Subnets, *info.IPAMData.Pool) + } + } + } + + return v4Subnets, v6Subnets +} + +// GraphDriverName returns the name of the graph driver used by the layer.Store +func (daemon *Daemon) GraphDriverName() string { + return daemon.layerStore.DriverName() +} + +// GetUIDGIDMaps returns the current daemon's user namespace settings +// for the full uid and gid maps which will be applied to containers +// started in this instance. +func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { + return daemon.uidMaps, daemon.gidMaps +} + +// GetRemappedUIDGID returns the current daemon's uid and gid values +// if user namespaces are in use for this daemon instance. If not +// this function will return "real" root values of 0, 0. +func (daemon *Daemon) GetRemappedUIDGID() (int, int) { + uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + return uid, gid +} + +// prepareTempDir prepares and returns the default directory to use +// for temporary files. +// If it doesn't exist, it is created. If it exists, its content is removed. +func prepareTempDir(rootDir string, rootUID, rootGID int) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + newName := tmpDir + "-old" + if err := os.Rename(tmpDir, newName); err == nil { + go func() { + if err := os.RemoveAll(newName); err != nil { + logrus.Warnf("failed to delete old tmp directory: %s", newName) + } + }() + } else { + logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err) + if err := os.RemoveAll(tmpDir); err != nil { + logrus.Warnf("failed to delete old tmp directory: %s", tmpDir) + } + } + } + // We don't remove the content of tmpdir if it's not the default, + // it may hold things that do not belong to us. + return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) +} + +func (daemon *Daemon) setupInitLayer(initPath string) error { + rootUID, rootGID := daemon.GetRemappedUIDGID() + return initlayer.Setup(initPath, rootUID, rootGID) +} + +func setDefaultMtu(conf *config.Config) { + // do nothing if the config does not have the default 0 value. + if conf.Mtu != 0 { + return + } + conf.Mtu = config.DefaultNetworkMtu +} + +func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) { + volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID) + if err != nil { + return nil, err + } + + volumedrivers.RegisterPluginGetter(daemon.PluginStore) + + if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { + return nil, errors.New("local volume driver could not be registered") + } + return store.New(daemon.configStore.Root) +} + +// IsShuttingDown tells whether the daemon is shutting down or not +func (daemon *Daemon) IsShuttingDown() bool { + return daemon.shutdown +} + +// initDiscovery initializes the discovery watcher for this daemon. +func (daemon *Daemon) initDiscovery(conf *config.Config) error { + advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise) + if err != nil { + if err == discovery.ErrDiscoveryDisabled { + return nil + } + return err + } + + conf.ClusterAdvertise = advertise + discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts) + if err != nil { + return fmt.Errorf("discovery initialization failed (%v)", err) + } + + daemon.discoveryWatcher = discoveryWatcher + return nil +} + +func isBridgeNetworkDisabled(conf *config.Config) bool { + return conf.BridgeConfig.Iface == config.DisableNetworkBridge +} + +func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { + options := []nwconfig.Option{} + if dconfig == nil { + return options, nil + } + + options = append(options, nwconfig.OptionExperimental(dconfig.Experimental)) + options = append(options, nwconfig.OptionDataDir(dconfig.Root)) + options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) + + dd := runconfig.DefaultDaemonNetworkMode() + dn := runconfig.DefaultDaemonNetworkMode().NetworkName() + options = append(options, nwconfig.OptionDefaultDriver(string(dd))) + options = append(options, nwconfig.OptionDefaultNetwork(dn)) + + if strings.TrimSpace(dconfig.ClusterStore) != "" { + kv := strings.Split(dconfig.ClusterStore, "://") + if len(kv) != 2 { + return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL") + } + options = append(options, nwconfig.OptionKVProvider(kv[0])) + options = append(options, nwconfig.OptionKVProviderURL(kv[1])) + } + if len(dconfig.ClusterOpts) > 0 { + options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) + } + + if daemon.discoveryWatcher != nil { + options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) + } + + if dconfig.ClusterAdvertise != "" { + options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) + } + + options = append(options, nwconfig.OptionLabels(dconfig.Labels)) + options = append(options, driverOptions(dconfig)...) + + if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { + options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) + } + + if pg != nil { + options = append(options, nwconfig.OptionPluginGetter(pg)) + } + + return options, nil +} + +func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { + out := make([]types.BlkioStatEntry, len(entries)) + for i, re := range entries { + out[i] = types.BlkioStatEntry{ + Major: re.Major, + Minor: re.Minor, + Op: re.Op, + Value: re.Value, + } + } + return out +} + +// GetCluster returns the cluster +func (daemon *Daemon) GetCluster() Cluster { + return daemon.cluster +} + +// SetCluster sets the cluster +func (daemon *Daemon) SetCluster(cluster Cluster) { + daemon.cluster = cluster +} + +func (daemon *Daemon) pluginShutdown() { + manager := daemon.pluginManager + // Check for a valid manager object. In error conditions, daemon init can fail + // and shutdown called, before plugin manager is initialized. + if manager != nil { + manager.Shutdown() + } +} + +// PluginManager returns current pluginManager associated with the daemon +func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method + return daemon.pluginManager +} + +// PluginGetter returns current pluginStore associated with the daemon +func (daemon *Daemon) PluginGetter() *plugin.Store { + return daemon.PluginStore +} + +// CreateDaemonRoot creates the root for the daemon +func CreateDaemonRoot(config *config.Config) error { + // get the canonical path to the Docker root directory + var realRoot string + if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { + realRoot = config.Root + } else { + realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) + if err != nil { + return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) + } + } + + uidMaps, gidMaps, err := setupRemappedRoot(config) + if err != nil { + return err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return err + } + + if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { + return err + } + + return nil +} diff --git a/components/engine/daemon/daemon_experimental.go b/components/engine/daemon/daemon_experimental.go new file mode 100644 index 0000000000..fb0251d4af --- /dev/null +++ b/components/engine/daemon/daemon_experimental.go @@ -0,0 +1,7 @@ +package daemon + +import "github.com/docker/docker/api/types/container" + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { + return nil, nil +} diff --git a/components/engine/daemon/daemon_linux.go b/components/engine/daemon/daemon_linux.go new file mode 100644 index 0000000000..5faf533fde --- /dev/null +++ b/components/engine/daemon/daemon_linux.go @@ -0,0 +1,88 @@ +package daemon + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +// On Linux, plugins use a static path for storing execution state, +// instead of deriving path from daemon's exec-root. This is because +// plugin socket files are created here and they cannot exceed max +// path length of 108 bytes. +func getPluginExecRoot(root string) string { + return "/run/docker/plugins" +} + +func (daemon *Daemon) cleanupMountsByID(id string) error { + logrus.Debugf("Cleaning up old mountid %s: start.", id) + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return err + } + defer f.Close() + + return daemon.cleanupMountsFromReaderByID(f, id, mount.Unmount) +} + +func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, unmount func(target string) error) error { + if daemon.root == "" { + return nil + } + var errors []string + + regexps := getCleanPatterns(id) + sc := bufio.NewScanner(reader) + for sc.Scan() { + if fields := strings.Fields(sc.Text()); len(fields) >= 4 { + if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) { + for _, p := range regexps { + if p.MatchString(mnt) { + if err := unmount(mnt); err != nil { + logrus.Error(err) + errors = append(errors, err.Error()) + } + } + } + } + } + } + + if err := sc.Err(); err != nil { + return err + } + + if len(errors) > 0 { + return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n")) + } + + logrus.Debugf("Cleaning up old mountid %v: done.", id) + return nil +} + +// cleanupMounts umounts shm/mqueue mounts for old containers +func (daemon *Daemon) cleanupMounts() error { + return daemon.cleanupMountsByID("") +} + +func getCleanPatterns(id string) (regexps []*regexp.Regexp) { + var patterns []string + if id == "" { + id = "[0-9a-f]{64}" + patterns = append(patterns, "containers/"+id+"/shm") + } + patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$") + for _, p := range patterns { + r, err := regexp.Compile(p) + if err == nil { + regexps = append(regexps, r) + } + } + return +} diff --git a/components/engine/daemon/daemon_linux_test.go b/components/engine/daemon/daemon_linux_test.go new file mode 100644 index 0000000000..c7d5117195 --- /dev/null +++ b/components/engine/daemon/daemon_linux_test.go @@ -0,0 +1,104 @@ +// +build linux + +package daemon + +import ( + "strings" + "testing" +) + +const mountsFixture = `142 78 0:38 / / rw,relatime - aufs none rw,si=573b861da0b3a05b,dio +143 142 0:60 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +144 142 0:67 / /dev rw,nosuid - tmpfs tmpfs rw,mode=755 +145 144 0:78 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 +146 144 0:49 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +147 142 0:84 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +148 147 0:86 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 +149 148 0:22 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuset +150 148 0:25 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpu +151 148 0:27 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuacct +152 148 0:28 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory +153 148 0:29 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,devices +154 148 0:30 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,freezer +155 148 0:31 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,blkio +156 148 0:32 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,perf_event +157 148 0:33 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,hugetlb +158 148 0:35 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +159 142 8:4 /home/mlaventure/gopath /home/mlaventure/gopath rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +160 142 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data /var/lib/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +164 142 8:4 /home/mlaventure/gopath/src/github.com/docker/docker /go/src/github.com/docker/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +165 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/resolv.conf /etc/resolv.conf rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +166 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hostname /etc/hostname rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +167 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hosts /etc/hosts rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +168 144 0:39 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +169 144 0:12 /14 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +83 147 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +89 142 0:87 / /tmp rw,relatime - tmpfs none rw +97 142 0:60 / /run/docker/netns/default rw,nosuid,nodev,noexec,relatime - proc proc rw +100 160 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data/aufs /var/lib/docker/aufs rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +115 100 0:102 / /var/lib/docker/aufs/mnt/0ecda1c63e5b58b3d89ff380bf646c95cc980252cf0b52466d43619aec7c8432 rw,relatime - aufs none rw,si=573b861dbc01905b,dio +116 160 0:107 / /var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +118 142 0:102 / /run/docker/libcontainerd/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/rootfs rw,relatime - aufs none rw,si=573b861dbc01905b,dio +242 142 0:60 / /run/docker/netns/c3664df2a0f7 rw,nosuid,nodev,noexec,relatime - proc proc rw +120 100 0:122 / /var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d rw,relatime - aufs none rw,si=573b861eb147805b,dio +171 142 0:122 / /run/docker/libcontainerd/e406ff6f3e18516d50e03dbca4de54767a69a403a6f7ec1edc2762812824521e/rootfs rw,relatime - aufs none rw,si=573b861eb147805b,dio +310 142 0:60 / /run/docker/netns/71a18572176b rw,nosuid,nodev,noexec,relatime - proc proc rw +` + +func TestCleanupMounts(t *testing.T) { + d := &Daemon{ + root: "/var/lib/docker/", + } + + expected := "/var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm" + var unmounted int + unmount := func(target string) error { + if target == expected { + unmounted++ + } + return nil + } + + d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "", unmount) + + if unmounted != 1 { + t.Fatal("Expected to unmount the shm (and the shm only)") + } +} + +func TestCleanupMountsByID(t *testing.T) { + d := &Daemon{ + root: "/var/lib/docker/", + } + + expected := "/var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d" + var unmounted int + unmount := func(target string) error { + if target == expected { + unmounted++ + } + return nil + } + + d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d", unmount) + + if unmounted != 1 { + t.Fatal("Expected to unmount the auf root (and that only)") + } +} + +func TestNotCleanupMounts(t *testing.T) { + d := &Daemon{ + repository: "", + } + var unmounted bool + unmount := func(target string) error { + unmounted = true + return nil + } + mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k` + d.cleanupMountsFromReaderByID(strings.NewReader(mountInfo), "", unmount) + if unmounted { + t.Fatal("Expected not to clean up /dev/shm") + } +} diff --git a/components/engine/daemon/daemon_solaris.go b/components/engine/daemon/daemon_solaris.go new file mode 100644 index 0000000000..aacb416a88 --- /dev/null +++ b/components/engine/daemon/daemon_solaris.go @@ -0,0 +1,527 @@ +// +build solaris,cgo + +package daemon + +import ( + "fmt" + "net" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + refstore "github.com/docker/docker/reference" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/drivers/solaris/bridge" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + lntypes "github.com/docker/libnetwork/types" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +//#include +import "C" + +const ( + defaultVirtualSwitch = "Virtual Switch" + platformSupported = true + solarisMinCPUShares = 1 + solarisMaxCPUShares = 65535 +) + +func getMemoryResources(config containertypes.Resources) specs.CappedMemory { + memory := specs.CappedMemory{} + + if config.Memory > 0 { + memory.Physical = strconv.FormatInt(config.Memory, 10) + } + + if config.MemorySwap != 0 { + memory.Swap = strconv.FormatInt(config.MemorySwap, 10) + } + + return memory +} + +func getCPUResources(config containertypes.Resources) specs.CappedCPU { + cpu := specs.CappedCPU{} + + if config.CpusetCpus != "" { + cpu.Ncpus = config.CpusetCpus + } + + return cpu +} + +func (daemon *Daemon) cleanupMountsByID(id string) error { + return nil +} + +func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { + return parseSecurityOpt(container, hostConfig) +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + //Since config.SecurityOpt is specifically defined as a "List of string values to + //customize labels for MLs systems, such as SELinux" + //until we figure out how to map to Trusted Extensions + //this is being disabled for now on Solaris + var ( + labelOpts []string + err error + ) + + if len(config.SecurityOpt) > 0 { + return errors.New("Security options are not supported on Solaris") + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + return nil +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return nil +} + +func checkKernel() error { + // solaris can rely upon checkSystem() below, we don't skew kernel versions + return nil +} + +func (daemon *Daemon) getCgroupDriver() string { + return "" +} + +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if hostConfig.CPUShares < 0 { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, solarisMinCPUShares) + hostConfig.CPUShares = solarisMinCPUShares + } else if hostConfig.CPUShares > solarisMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, solarisMaxCPUShares) + hostConfig.CPUShares = solarisMaxCPUShares + } + + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + + if hostConfig.ShmSize != 0 { + hostConfig.ShmSize = container.DefaultSHMSize + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *Config) bool { + return false +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + sysInfo := sysinfo.New(true) + // NOTE: We do not enforce a minimum value for swap limits for zones on Solaris and + // therefore we will not do that for Docker container either. + if hostConfig.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") + hostConfig.Memory = 0 + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") + logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") + } + // Solaris NOTE: We allow and encourage setting the swap without setting the memory limit. + + if hostConfig.MemorySwappiness != nil && *hostConfig.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + hostConfig.MemorySwappiness = nil + } + if hostConfig.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.") + hostConfig.MemoryReservation = 0 + } + if hostConfig.Memory > 0 && hostConfig.MemoryReservation > 0 && hostConfig.Memory < hostConfig.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.") + } + if hostConfig.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + hostConfig.KernelMemory = 0 + } + if hostConfig.CPUShares != 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") + logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.") + hostConfig.CPUShares = 0 + } + if hostConfig.CPUShares < 0 { + warnings = append(warnings, "Invalid CPUShares value. Must be positive. Discarding.") + logrus.Warnf("Invalid CPUShares value. Must be positive. Discarding.") + hostConfig.CPUQuota = 0 + } + if hostConfig.CPUShares > 0 && !sysinfo.IsCPUSharesAvailable() { + warnings = append(warnings, "Global zone default scheduling class not FSS. Discarding shares.") + logrus.Warnf("Global zone default scheduling class not FSS. Discarding shares.") + hostConfig.CPUShares = 0 + } + + // Solaris NOTE: Linux does not do negative checking for CPUShares and Quota here. But it makes sense to. + if hostConfig.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") + logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") + if hostConfig.CPUQuota > 0 { + warnings = append(warnings, "Quota will be applied on default period, not period specified.") + logrus.Warnf("Quota will be applied on default period, not period specified.") + } + hostConfig.CPUPeriod = 0 + } + if hostConfig.CPUQuota != 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") + logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") + hostConfig.CPUQuota = 0 + } + if hostConfig.CPUQuota < 0 { + warnings = append(warnings, "Invalid CPUQuota value. Must be positive. Discarding.") + logrus.Warnf("Invalid CPUQuota value. Must be positive. Discarding.") + hostConfig.CPUQuota = 0 + } + if (hostConfig.CpusetCpus != "" || hostConfig.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") + logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.") + hostConfig.CpusetCpus = "" + hostConfig.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(hostConfig.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus.", hostConfig.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s.", hostConfig.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(hostConfig.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems.", hostConfig.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s.", hostConfig.CpusetMems, sysInfo.Mems) + } + if hostConfig.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") + logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.") + hostConfig.BlkioWeight = 0 + } + if hostConfig.OomKillDisable != nil && !sysInfo.OomKillDisable { + *hostConfig.OomKillDisable = false + // Don't warn; this is the default setting but only applicable to Linux + } + + if sysInfo.IPv4ForwardingDisabled { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") + } + + // Solaris NOTE: We do not allow setting Linux specific options, so check and warn for all of them. + + if hostConfig.CapAdd != nil || hostConfig.CapDrop != nil { + warnings = append(warnings, "Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") + logrus.Warnf("Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") + hostConfig.CapAdd = nil + hostConfig.CapDrop = nil + } + + if hostConfig.GroupAdd != nil { + warnings = append(warnings, "Additional groups unsupported on Solaris.Discarding groups lists.") + logrus.Warnf("Additional groups unsupported on Solaris.Discarding groups lists.") + hostConfig.GroupAdd = nil + } + + if hostConfig.IpcMode != "" { + warnings = append(warnings, "IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") + logrus.Warnf("IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") + hostConfig.IpcMode = "" + } + + if hostConfig.PidMode != "" { + warnings = append(warnings, "PID namespace setting unsupported on Solaris. Running container in host PID namespace.") + logrus.Warnf("PID namespace setting unsupported on Solaris. Running container in host PID namespace.") + hostConfig.PidMode = "" + } + + if hostConfig.Privileged { + warnings = append(warnings, "Privileged mode unsupported on Solaris. Discarding privileged mode setting.") + logrus.Warnf("Privileged mode unsupported on Solaris. Discarding privileged mode setting.") + hostConfig.Privileged = false + } + + if hostConfig.UTSMode != "" { + warnings = append(warnings, "UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") + logrus.Warnf("UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") + hostConfig.UTSMode = "" + } + + if hostConfig.CgroupParent != "" { + warnings = append(warnings, "Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") + logrus.Warnf("Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") + hostConfig.CgroupParent = "" + } + + if hostConfig.Ulimits != nil { + warnings = append(warnings, "Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") + logrus.Warnf("Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") + hostConfig.Ulimits = nil + } + + return warnings, nil +} + +// reloadPlatform updates configuration with platform specific options +// and updates the passed attributes +func (daemon *Daemon) reloadPlatform(config *Config, attributes map[string]string) { +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + + if config.DefaultRuntime == "" { + config.DefaultRuntime = stockRuntimeName + } + if config.Runtimes == nil { + config.Runtimes = make(map[string]types.Runtime) + } + stockRuntimeOpts := []string{} + config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary, Args: stockRuntimeOpts} + + // checkSystem validates platform-specific requirements + return nil +} + +func checkSystem() error { + // check OS version for compatibility, ensure running in global zone + var err error + var id C.zoneid_t + + if id, err = C.getzoneid(); err != nil { + return fmt.Errorf("Exiting. Error getting zone id: %+v", err) + } + if int(id) != 0 { + return fmt.Errorf("Exiting because the Docker daemon is not running in the global zone") + } + + v, err := kernel.GetKernelVersion() + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 5, Major: 12, Minor: 0}) < 0 { + return fmt.Errorf("Your Solaris kernel version: %s doesn't support Docker. Please upgrade to 5.12.0", v.String()) + } + return err +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *Config) error { + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + return nil +} + +func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) + if err != nil { + return nil, err + } + + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + // Initialize default network on "null" + if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)); err != nil { + return nil, fmt.Errorf("Error creating default 'null' network: %v", err) + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } + + return controller, nil +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + if n, err := controller.NetworkByName("bridge"); err == nil { + if err = n.Delete(); err != nil { + return fmt.Errorf("could not delete the default bridge network: %v", err) + } + } + + bridgeName := bridge.DefaultBridgeName + if config.bridgeConfig.Iface != "" { + bridgeName = config.bridgeConfig.Iface + } + netOption := map[string]string{ + bridge.BridgeName: bridgeName, + bridge.DefaultBridge: strconv.FormatBool(true), + netlabel.DriverMTU: strconv.Itoa(config.Mtu), + bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), + } + + // --ip processing + if config.bridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() + } + + var ipamV4Conf *libnetwork.IpamConf + + ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + + nwList, _, err := netutils.ElectInterfaceAddresses(bridgeName) + if err != nil { + return errors.Wrap(err, "list bridge addresses failed") + } + + nw := nwList[0] + if len(nwList) > 1 && config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return errors.Wrap(err, "parse CIDR failed") + } + // Iterate through in case there are multiple addresses for the bridge + for _, entry := range nwList { + if fCIDR.Contains(entry.IP) { + nw = entry + break + } + } + } + + ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() + hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) + if hip.IsGlobalUnicast() { + ipamV4Conf.Gateway = nw.IP.String() + } + + if config.bridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.bridgeConfig.IP + ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) + if err != nil { + return err + } + ipamV4Conf.Gateway = ip.String() + } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { + logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) + } + + if config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return err + } + + ipamV4Conf.SubPool = fCIDR.String() + } + + if config.bridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() + } + + v4Conf := []*libnetwork.IpamConf{ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + + // Initialize default network on "bridge" with the same name + _, err = controller.NewNetwork("bridge", "bridge", "", + libnetwork.NetworkOptionDriverOpts(netOption), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(false)) + if err != nil { + return fmt.Errorf("Error creating default 'bridge' network: %v", err) + } + return nil +} + +// registerLinks sets up links between containers and writes the +// configuration out for persistence. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + return nil +} + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func restoreCustomImage(is image.Store, ls layer.Store, rs refstore.Store) error { + // Solaris has no custom images to register + return nil +} + +func driverOptions(config *Config) []nwconfig.Option { + return []nwconfig.Option{} +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + return nil, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + return types.RootFS{} +} + +func setupDaemonProcess(config *Config) error { + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + return nil +} diff --git a/components/engine/daemon/daemon_test.go b/components/engine/daemon/daemon_test.go new file mode 100644 index 0000000000..eaa3be44d8 --- /dev/null +++ b/components/engine/daemon/daemon_test.go @@ -0,0 +1,311 @@ +// +build !solaris + +package daemon + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + _ "github.com/docker/docker/pkg/discovery/memory" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/volume" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/go-connections/nat" +) + +// +// https://github.com/docker/docker/issues/8069 +// + +func TestGetContainer(t *testing.T) { + c1 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + Name: "tender_bardeen", + }, + } + + c2 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", + Name: "drunk_hawking", + }, + } + + c3 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", + Name: "3cdbd1aa", + }, + } + + c4 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", + Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + }, + } + + c5 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", + Name: "d22d69a2b896", + }, + } + + store := container.NewMemoryStore() + store.Add(c1.ID, c1) + store.Add(c2.ID, c2) + store.Add(c3.ID, c3) + store.Add(c4.ID, c4) + store.Add(c5.ID, c5) + + index := truncindex.NewTruncIndex([]string{}) + index.Add(c1.ID) + index.Add(c2.ID) + index.Add(c3.ID) + index.Add(c4.ID) + index.Add(c5.ID) + + daemon := &Daemon{ + containers: store, + idIndex: index, + nameIndex: registrar.NewRegistrar(), + } + + daemon.reserveName(c1.ID, c1.Name) + daemon.reserveName(c2.ID, c2.Name) + daemon.reserveName(c3.ID, c3.Name) + daemon.reserveName(c4.ID, c4.Name) + daemon.reserveName(c5.ID, c5.Name) + + if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { + t.Fatal("Should explicitly match full container IDs") + } + + if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 { + t.Fatal("Should match a partial ID") + } + + if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 { + t.Fatal("Should match a full name") + } + + // c3.Name is a partial match for both c3.ID and c2.ID + if c, _ := daemon.GetContainer("3cdbd1aa"); c != c3 { + t.Fatal("Should match a full name even though it collides with another container's ID") + } + + if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 { + t.Fatal("Should match a container where the provided prefix is an exact match to the its name, and is also a prefix for its ID") + } + + if _, err := daemon.GetContainer("3cdbd1"); err == nil { + t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") + } + + if _, err := daemon.GetContainer("nothing"); err == nil { + t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") + } +} + +func initDaemonWithVolumeStore(tmp string) (*Daemon, error) { + var err error + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + daemon.volumes, err = store.New(tmp) + if err != nil { + return nil, err + } + + volumesDriver, err := local.New(tmp, 0, 0) + if err != nil { + return nil, err + } + volumedrivers.Register(volumesDriver, volumesDriver.Name()) + + return daemon, nil +} + +func TestValidContainerNames(t *testing.T) { + invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"} + validNames := []string{"word-word", "word_word", "1weoid"} + + for _, name := range invalidNames { + if validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is not a valid container name and was returned as valid.", name) + } + } + + for _, name := range validNames { + if !validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is a valid container name and was returned as invalid.", name) + } + } +} + +func TestContainerInitDNS(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-container-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerID) + if err := os.MkdirAll(containerPath, 0755); err != nil { + t.Fatal(err) + } + + config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0, +"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, +"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", +"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"", +"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, +"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, +"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", +"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}}, +"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", +"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", +"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", +"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", +"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}` + + // Container struct only used to retrieve path to config file + container := &container.Container{CommonContainer: container.CommonContainer{Root: containerPath}} + configPath, err := container.ConfigPath() + if err != nil { + t.Fatal(err) + } + if err = ioutil.WriteFile(configPath, []byte(config), 0644); err != nil { + t.Fatal(err) + } + + hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, +"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, +"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` + + hostConfigPath, err := container.HostConfigPath() + if err != nil { + t.Fatal(err) + } + if err = ioutil.WriteFile(hostConfigPath, []byte(hostConfig), 0644); err != nil { + t.Fatal(err) + } + + daemon, err := initDaemonWithVolumeStore(tmp) + if err != nil { + t.Fatal(err) + } + defer volumedrivers.Unregister(volume.DefaultDriverName) + + c, err := daemon.load(containerID) + if err != nil { + t.Fatal(err) + } + + if c.HostConfig.DNS == nil { + t.Fatal("Expected container DNS to not be nil") + } + + if c.HostConfig.DNSSearch == nil { + t.Fatal("Expected container DNSSearch to not be nil") + } + + if c.HostConfig.DNSOptions == nil { + t.Fatal("Expected container DNSOptions to not be nil") + } +} + +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestMerge(t *testing.T) { + volumesImage := make(map[string]struct{}) + volumesImage["/test1"] = struct{}{} + volumesImage["/test2"] = struct{}{} + portsImage := make(nat.PortSet) + portsImage[newPortNoError("tcp", "1111")] = struct{}{} + portsImage[newPortNoError("tcp", "2222")] = struct{}{} + configImage := &containertypes.Config{ + ExposedPorts: portsImage, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumesImage, + } + + portsUser := make(nat.PortSet) + portsUser[newPortNoError("tcp", "2222")] = struct{}{} + portsUser[newPortNoError("tcp", "3333")] = struct{}{} + volumesUser := make(map[string]struct{}) + volumesUser["/test3"] = struct{}{} + configUser := &containertypes.Config{ + ExposedPorts: portsUser, + Env: []string{"VAR2=3", "VAR3=3"}, + Volumes: volumesUser, + } + + if err := merge(configUser, configImage); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 3 { + t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) + } + } + if len(configUser.Env) != 3 { + t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) + } + for _, env := range configUser.Env { + if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { + t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + } + } + + if len(configUser.Volumes) != 3 { + t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) + } + for v := range configUser.Volumes { + if v != "/test1" && v != "/test2" && v != "/test3" { + t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) + } + } + + ports, _, err := nat.ParsePortSpecs([]string{"0000"}) + if err != nil { + t.Error(err) + } + configImage2 := &containertypes.Config{ + ExposedPorts: ports, + } + + if err := merge(configUser, configImage2); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 4 { + t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) + } + } +} diff --git a/components/engine/daemon/daemon_unix.go b/components/engine/daemon/daemon_unix.go new file mode 100644 index 0000000000..031cdc4a8b --- /dev/null +++ b/components/engine/daemon/daemon_unix.go @@ -0,0 +1,1318 @@ +// +build linux freebsd + +package daemon + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "runtime" + "runtime/debug" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/blkiodev" + pblkiodev "github.com/docker/docker/api/types/blkiodev" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/image" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/drivers/bridge" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/options" + lntypes "github.com/docker/libnetwork/types" + "github.com/golang/protobuf/ptypes" + "github.com/opencontainers/runc/libcontainer/cgroups" + rsystem "github.com/opencontainers/runc/libcontainer/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/vishvananda/netlink" +) + +const ( + // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 + linuxMinCPUShares = 2 + linuxMaxCPUShares = 262144 + platformSupported = true + // It's not kernel limit, we want this 4M limit to supply a reasonable functional container + linuxMinMemory = 4194304 + // constants for remapped root settings + defaultIDSpecifier string = "default" + defaultRemappedID string = "dockremap" + + // constant for cgroup drivers + cgroupFsDriver = "cgroupfs" + cgroupSystemdDriver = "systemd" +) + +func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory { + memory := specs.LinuxMemory{} + + if config.Memory > 0 { + limit := uint64(config.Memory) + memory.Limit = &limit + } + + if config.MemoryReservation > 0 { + reservation := uint64(config.MemoryReservation) + memory.Reservation = &reservation + } + + if config.MemorySwap > 0 { + swap := uint64(config.MemorySwap) + memory.Swap = &swap + } + + if config.MemorySwappiness != nil { + swappiness := uint64(*config.MemorySwappiness) + memory.Swappiness = &swappiness + } + + if config.KernelMemory != 0 { + kernelMemory := uint64(config.KernelMemory) + memory.Kernel = &kernelMemory + } + + return &memory +} + +func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) { + cpu := specs.LinuxCPU{} + + if config.CPUShares < 0 { + return nil, fmt.Errorf("shares: invalid argument") + } + if config.CPUShares >= 0 { + shares := uint64(config.CPUShares) + cpu.Shares = &shares + } + + if config.CpusetCpus != "" { + cpu.Cpus = config.CpusetCpus + } + + if config.CpusetMems != "" { + cpu.Mems = config.CpusetMems + } + + if config.NanoCPUs > 0 { + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + period := uint64(100 * time.Millisecond / time.Microsecond) + quota := config.NanoCPUs * int64(period) / 1e9 + cpu.Period = &period + cpu.Quota = "a + } + + if config.CPUPeriod != 0 { + period := uint64(config.CPUPeriod) + cpu.Period = &period + } + + if config.CPUQuota != 0 { + q := config.CPUQuota + cpu.Quota = &q + } + + if config.CPURealtimePeriod != 0 { + period := uint64(config.CPURealtimePeriod) + cpu.RealtimePeriod = &period + } + + if config.CPURealtimeRuntime != 0 { + c := config.CPURealtimeRuntime + cpu.RealtimeRuntime = &c + } + + return &cpu, nil +} + +func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) { + var stat syscall.Stat_t + var blkioWeightDevices []specs.LinuxWeightDevice + + for _, weightDevice := range config.BlkioWeightDevice { + if err := syscall.Stat(weightDevice.Path, &stat); err != nil { + return nil, err + } + weight := weightDevice.Weight + d := specs.LinuxWeightDevice{Weight: &weight} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWeightDevices = append(blkioWeightDevices, d) + } + + return blkioWeightDevices, nil +} + +func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { + container.NoNewPrivileges = daemon.configStore.NoNewPrivileges + return parseSecurityOpt(container, hostConfig) +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + var ( + labelOpts []string + err error + ) + + for _, opt := range config.SecurityOpt { + if opt == "no-new-privileges" { + container.NoNewPrivileges = true + continue + } + if opt == "disable" { + labelOpts = append(labelOpts, "disable") + continue + } + + var con []string + if strings.Contains(opt, "=") { + con = strings.SplitN(opt, "=", 2) + } else if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") + } + if len(con) != 2 { + return fmt.Errorf("invalid --security-opt 1: %q", opt) + } + + switch con[0] { + case "label": + labelOpts = append(labelOpts, con[1]) + case "apparmor": + container.AppArmorProfile = con[1] + case "seccomp": + container.SeccompProfile = con[1] + case "no-new-privileges": + noNewPrivileges, err := strconv.ParseBool(con[1]) + if err != nil { + return fmt.Errorf("invalid --security-opt 2: %q", opt) + } + container.NoNewPrivileges = noNewPrivileges + default: + return fmt.Errorf("invalid --security-opt 2: %q", opt) + } + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) { + var throttleDevices []specs.LinuxThrottleDevice + var stat syscall.Stat_t + + for _, d := range devs { + if err := syscall.Stat(d.Path, &stat); err != nil { + return nil, err + } + d := specs.LinuxThrottleDevice{Rate: d.Rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + throttleDevices = append(throttleDevices, d) + } + + return throttleDevices, nil +} + +func checkKernel() error { + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.10 crashes are clearer. + // For details see https://github.com/docker/docker/issues/407 + // Docker 1.11 and above doesn't actually run on kernels older than 3.4, + // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). + if !kernel.CheckKernelVersion(3, 10, 0) { + v, _ := kernel.GetKernelVersion() + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) + } + } + return nil +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if adjustCPUShares && hostConfig.CPUShares > 0 { + // Handle unsupported CPUShares + if hostConfig.CPUShares < linuxMinCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) + hostConfig.CPUShares = linuxMinCPUShares + } else if hostConfig.CPUShares > linuxMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) + hostConfig.CPUShares = linuxMaxCPUShares + } + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + if hostConfig.ShmSize == 0 { + hostConfig.ShmSize = config.DefaultShmSize + if daemon.configStore != nil { + hostConfig.ShmSize = int64(daemon.configStore.ShmSize) + } + } + var err error + opts, err := daemon.generateSecurityOpt(hostConfig) + if err != nil { + return err + } + hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...) + if hostConfig.MemorySwappiness == nil { + defaultSwappiness := int64(-1) + hostConfig.MemorySwappiness = &defaultSwappiness + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { + warnings := []string{} + + // memory subsystem checks and adjustments + if resources.Memory != 0 && resources.Memory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") + } + if resources.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.Memory = 0 + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") + logrus.Warn("Your kernel does not support swap limit capabilities,or the cgroup is not mounted. Memory limited without swap.") + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") + } + if resources.Memory == 0 && resources.MemorySwap > 0 && !update { + return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") + } + if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") + logrus.Warn("Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.") + resources.MemorySwappiness = nil + } + if resources.MemorySwappiness != nil { + swappiness := *resources.MemorySwappiness + if swappiness < -1 || swappiness > 100 { + return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) + } + } + if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.MemoryReservation = 0 + } + if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory reservation allowed is 4MB") + } + if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") + } + if resources.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.KernelMemory = 0 + } + if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") + } + if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) { + warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + logrus.Warn("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + } + if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { + // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point + // warning the caller if they already wanted the feature to be off + if *resources.OomKillDisable { + warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") + logrus.Warn("Your kernel does not support OomKillDisable. OomKillDisable discarded.") + } + resources.OomKillDisable = nil + } + + if resources.PidsLimit != 0 && !sysInfo.PidsLimit { + warnings = append(warnings, "Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + logrus.Warn("Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + resources.PidsLimit = 0 + } + + // cpu subsystem checks and adjustments + if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { + return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") + } + if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { + return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") + } + if resources.NanoCPUs > 0 && (!sysInfo.CPUCfsPeriod || !sysInfo.CPUCfsQuota) { + return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU cfs period/quota or the cgroup is not mounted") + } + // The highest precision we could get on Linux is 0.001, by setting + // cpu.cfs_period_us=1000ms + // cpu.cfs_quota=1ms + // See the following link for details: + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. + // The error message is 0.01 so that this is consistent with Windows + if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { + return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) + } + + if resources.CPUShares > 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + logrus.Warn("Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + resources.CPUShares = 0 + } + if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + logrus.Warn("Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + resources.CPUPeriod = 0 + } + if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { + return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") + } + if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + logrus.Warn("Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + resources.CPUQuota = 0 + } + if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { + return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") + } + if resources.CPUPercent > 0 { + warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) + logrus.Warnf("%s does not support CPU percent. Percent discarded.", runtime.GOOS) + resources.CPUPercent = 0 + } + + // cpuset subsystem checks and adjustments + if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") + logrus.Warn("Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") + resources.CpusetCpus = "" + resources.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus", resources.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems", resources.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) + } + + // blkio subsystem checks and adjustments + if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + logrus.Warn("Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + resources.BlkioWeight = 0 + } + if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { + return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") + } + if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { + return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) + } + if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { + warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + logrus.Warn("Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} + } + if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") + logrus.Warn("Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded") + resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + logrus.Warn("Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + logrus.Warn("Your kernel does not support IOPS Block I/O read limit in IO or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + logrus.Warn("Your kernel does not support IOPS Block I/O write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} + } + + return warnings, nil +} + +func (daemon *Daemon) getCgroupDriver() string { + cgroupDriver := cgroupFsDriver + + if UsingSystemd(daemon.configStore) { + cgroupDriver = cgroupSystemdDriver + } + return cgroupDriver +} + +// getCD gets the raw value of the native.cgroupdriver option, if set. +func getCD(config *config.Config) string { + for _, option := range config.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { + continue + } + return val + } + return "" +} + +// VerifyCgroupDriver validates native.cgroupdriver +func VerifyCgroupDriver(config *config.Config) error { + cd := getCD(config) + if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { + return nil + } + return fmt.Errorf("native.cgroupdriver option %s not supported", cd) +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *config.Config) bool { + return getCD(config) == cgroupSystemdDriver +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + var warnings []string + sysInfo := sysinfo.New(true) + + warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) + if err != nil { + return warnings, err + } + + w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) + + // no matter err is nil or not, w could have data in itself. + warnings = append(warnings, w...) + + if err != nil { + return warnings, err + } + + if hostConfig.ShmSize < 0 { + return warnings, fmt.Errorf("SHM size can not be less than 0") + } + + if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { + return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) + } + + // ip-forwarding does not affect container with '--net=host' (or '--net=none') + if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warn("IPv4 forwarding is disabled. Networking will not work") + } + // check for various conflicting options with user namespaces + if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { + if hostConfig.Privileged { + return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") + } + if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") + } + if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") + } + } + if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { + // CgroupParent for systemd cgroup should be named as "xxx.slice" + if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { + return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + if hostConfig.Runtime == "" { + hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() + } + + if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { + return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) + } + + for dest := range hostConfig.Tmpfs { + if err := volume.ValidateTmpfsMountDestination(dest); err != nil { + return warnings, err + } + } + + return warnings, nil +} + +// reloadPlatform updates configuration with platform specific options +// and updates the passed attributes +func (daemon *Daemon) reloadPlatform(conf *config.Config, attributes map[string]string) { + if conf.IsValueSet("runtimes") { + daemon.configStore.Runtimes = conf.Runtimes + // Always set the default one + daemon.configStore.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + } + + if conf.DefaultRuntime != "" { + daemon.configStore.DefaultRuntime = conf.DefaultRuntime + } + + if conf.IsValueSet("default-shm-size") { + daemon.configStore.ShmSize = conf.ShmSize + } + + // Update attributes + var runtimeList bytes.Buffer + for name, rt := range daemon.configStore.Runtimes { + if runtimeList.Len() > 0 { + runtimeList.WriteRune(' ') + } + runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt)) + } + + attributes["runtimes"] = runtimeList.String() + attributes["default-runtime"] = daemon.configStore.DefaultRuntime + attributes["default-shm-size"] = fmt.Sprintf("%d", daemon.configStore.ShmSize) +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(conf *config.Config) error { + // Check for mutually incompatible config options + if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" { + return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") + } + if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication { + return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") + } + if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq { + conf.BridgeConfig.EnableIPMasq = false + } + if err := VerifyCgroupDriver(conf); err != nil { + return err + } + if conf.CgroupParent != "" && UsingSystemd(conf) { + if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") { + return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + + if conf.DefaultRuntime == "" { + conf.DefaultRuntime = config.StockRuntimeName + } + if conf.Runtimes == nil { + conf.Runtimes = make(map[string]types.Runtime) + } + conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + if os.Geteuid() != 0 { + return fmt.Errorf("The Docker daemon needs to be run as root") + } + return checkKernel() +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *config.Config) error { + mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") + if err != nil { + return err + } + mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) + if err != nil { + return err + } + maxThreads := (mtint / 100) * 90 + debug.SetMaxThreads(maxThreads) + logrus.Debugf("Golang's threads limit set to %d", maxThreads) + return nil +} + +func overlaySupportsSelinux() (bool, error) { + f, err := os.Open("/proc/kallsyms") + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + defer f.Close() + + var symAddr, symType, symName, text string + + s := bufio.NewScanner(f) + for s.Scan() { + if err := s.Err(); err != nil { + return false, err + } + + text = s.Text() + if _, err := fmt.Sscanf(text, "%s %s %s", &symAddr, &symType, &symName); err != nil { + return false, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + + // Check for presence of symbol security_inode_copy_up. + if symName == "security_inode_copy_up" { + return true, nil + } + } + return false, nil +} + +// configureKernelSecuritySupport configures and validates security support for the kernel +func configureKernelSecuritySupport(config *config.Config, driverName string) error { + if config.EnableSelinuxSupport { + if !selinuxEnabled() { + logrus.Warn("Docker could not enable SELinux on the host system") + return nil + } + + if driverName == "overlay" || driverName == "overlay2" { + // If driver is overlay or overlay2, make sure kernel + // supports selinux with overlay. + supported, err := overlaySupportsSelinux() + if err != nil { + return err + } + + if !supported { + logrus.Warnf("SELinux is not supported with the %s graph driver on this kernel", driverName) + } + } + } else { + selinuxSetDisabled() + } + return nil +} + +func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) + if err != nil { + return nil, err + } + + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + if len(activeSandboxes) > 0 { + logrus.Info("There are old running containers, the network config will not take affect") + return controller, nil + } + + // Initialize default network on "null" + if n, _ := controller.NetworkByName("none"); n == nil { + if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { + return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) + } + } + + // Initialize default network on "host" + if n, _ := controller.NetworkByName("host"); n == nil { + if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { + return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) + } + } + + // Clear stale bridge network + if n, err := controller.NetworkByName("bridge"); err == nil { + if err = n.Delete(); err != nil { + return nil, fmt.Errorf("could not delete the default bridge network: %v", err) + } + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } else { + removeDefaultBridgeInterface() + } + + return controller, nil +} + +func driverOptions(config *config.Config) []nwconfig.Option { + bridgeConfig := options.Generic{ + "EnableIPForwarding": config.BridgeConfig.EnableIPForward, + "EnableIPTables": config.BridgeConfig.EnableIPTables, + "EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy, + "UserlandProxyPath": config.BridgeConfig.UserlandProxyPath} + bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig} + + dOptions := []nwconfig.Option{} + dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption)) + return dOptions +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { + bridgeName := bridge.DefaultBridgeName + if config.BridgeConfig.Iface != "" { + bridgeName = config.BridgeConfig.Iface + } + netOption := map[string]string{ + bridge.BridgeName: bridgeName, + bridge.DefaultBridge: strconv.FormatBool(true), + netlabel.DriverMTU: strconv.Itoa(config.Mtu), + bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq), + bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication), + } + + // --ip processing + if config.BridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String() + } + + var ( + ipamV4Conf *libnetwork.IpamConf + ipamV6Conf *libnetwork.IpamConf + ) + + ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + + nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) + if err != nil { + return errors.Wrap(err, "list bridge addresses failed") + } + + nw := nwList[0] + if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) + if err != nil { + return errors.Wrap(err, "parse CIDR failed") + } + // Iterate through in case there are multiple addresses for the bridge + for _, entry := range nwList { + if fCIDR.Contains(entry.IP) { + nw = entry + break + } + } + } + + ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() + hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) + if hip.IsGlobalUnicast() { + ipamV4Conf.Gateway = nw.IP.String() + } + + if config.BridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.BridgeConfig.IP + ip, _, err := net.ParseCIDR(config.BridgeConfig.IP) + if err != nil { + return err + } + ipamV4Conf.Gateway = ip.String() + } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { + logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) + } + + if config.BridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) + if err != nil { + return err + } + + ipamV4Conf.SubPool = fCIDR.String() + } + + if config.BridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String() + } + + var deferIPv6Alloc bool + if config.BridgeConfig.FixedCIDRv6 != "" { + _, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6) + if err != nil { + return err + } + + // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has + // at least 48 host bits, we need to guarantee the current behavior where the containers' + // IPv6 addresses will be constructed based on the containers' interface MAC address. + // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints + // on this network until after the driver has created the endpoint and returned the + // constructed address. Libnetwork will then reserve this address with the ipam driver. + ones, _ := fCIDRv6.Mask.Size() + deferIPv6Alloc = ones <= 80 + + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + } + ipamV6Conf.PreferredPool = fCIDRv6.String() + + // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 + // address belongs to the same network, we need to inform libnetwork about it, so + // that it can be reserved with IPAM and it will not be given away to somebody else + for _, nw6 := range nw6List { + if fCIDRv6.Contains(nw6.IP) { + ipamV6Conf.Gateway = nw6.IP.String() + break + } + } + } + + if config.BridgeConfig.DefaultGatewayIPv6 != nil { + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + } + ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String() + } + + v4Conf := []*libnetwork.IpamConf{ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + if ipamV6Conf != nil { + v6Conf = append(v6Conf, ipamV6Conf) + } + // Initialize default network on "bridge" with the same name + _, err = controller.NewNetwork("bridge", "bridge", "", + libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6), + libnetwork.NetworkOptionDriverOpts(netOption), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) + if err != nil { + return fmt.Errorf("Error creating default \"bridge\" network: %v", err) + } + return nil +} + +// Remove default bridge interface if present (--bridge=none use case) +func removeDefaultBridgeInterface() { + if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { + if err := netlink.LinkDel(lnk); err != nil { + logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) + } + } +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return daemon.setupInitLayer +} + +// Parse the remapped root (user namespace) option, which can be one of: +// username - valid username from /etc/passwd +// username:groupname - valid username; valid groupname from /etc/group +// uid - 32-bit unsigned int valid Linux UID value +// uid:gid - uid value; 32-bit unsigned int Linux GID value +// +// If no groupname is specified, and a username is specified, an attempt +// will be made to lookup a gid for that username as a groupname +// +// If names are used, they are verified to exist in passwd/group +func parseRemappedRoot(usergrp string) (string, string, error) { + + var ( + userID, groupID int + username, groupname string + ) + + idparts := strings.Split(usergrp, ":") + if len(idparts) > 2 { + return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) + } + + if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { + // must be a uid; take it as valid + userID = int(uid) + luser, err := idtools.LookupUID(userID) + if err != nil { + return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) + } + username = luser.Name + if len(idparts) == 1 { + // if the uid was numeric and no gid was specified, take the uid as the gid + groupID = userID + lgrp, err := idtools.LookupGID(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) + } + groupname = lgrp.Name + } + } else { + lookupName := idparts[0] + // special case: if the user specified "default", they want Docker to create or + // use (after creation) the "dockremap" user/group for root remapping + if lookupName == defaultIDSpecifier { + lookupName = defaultRemappedID + } + luser, err := idtools.LookupUser(lookupName) + if err != nil && idparts[0] != defaultIDSpecifier { + // error if the name requested isn't the special "dockremap" ID + return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) + } else if err != nil { + // special case-- if the username == "default", then we have been asked + // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} + // ranges will be used for the user and group mappings in user namespaced containers + _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) + if err == nil { + return defaultRemappedID, defaultRemappedID, nil + } + return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) + } + username = luser.Name + if len(idparts) == 1 { + // we only have a string username, and no group specified; look up gid from username as group + group, err := idtools.LookupGroup(lookupName) + if err != nil { + return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) + } + groupname = group.Name + } + } + + if len(idparts) == 2 { + // groupname or gid is separately specified and must be resolved + // to an unsigned 32-bit gid + if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { + // must be a gid, take it as valid + groupID = int(gid) + lgrp, err := idtools.LookupGID(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) + } + groupname = lgrp.Name + } else { + // not a number; attempt a lookup + if _, err := idtools.LookupGroup(idparts[1]); err != nil { + return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) + } + groupname = idparts[1] + } + } + return username, groupname, nil +} + +func setupRemappedRoot(config *config.Config) ([]idtools.IDMap, []idtools.IDMap, error) { + if runtime.GOOS != "linux" && config.RemappedRoot != "" { + return nil, nil, fmt.Errorf("User namespaces are only supported on Linux") + } + + // if the daemon was started with remapped root option, parse + // the config option to the int uid,gid values + var ( + uidMaps, gidMaps []idtools.IDMap + ) + if config.RemappedRoot != "" { + username, groupname, err := parseRemappedRoot(config.RemappedRoot) + if err != nil { + return nil, nil, err + } + if username == "root" { + // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op + // effectively + logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") + return uidMaps, gidMaps, nil + } + logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) + // update remapped root setting now that we have resolved them to actual names + config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) + + uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname) + if err != nil { + return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err) + } + } + return uidMaps, gidMaps, nil +} + +func setupDaemonRoot(config *config.Config, rootDir string, rootUID, rootGID int) error { + config.Root = rootDir + // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) + // so that syscalls executing as non-root, operating on subdirectories of the graph root + // (e.g. mounted layers of a container) can traverse this path. + // The user namespace support will create subdirectories for the remapped root host uid:gid + // pair owned by that same uid:gid pair for proper write access to those needed metadata and + // layer content subtrees. + if _, err := os.Stat(rootDir); err == nil { + // root current exists; verify the access bits are correct by setting them + if err = os.Chmod(rootDir, 0711); err != nil { + return err + } + } else if os.IsNotExist(err) { + // no root exists yet, create it 0711 with root:root ownership + if err := os.MkdirAll(rootDir, 0711); err != nil { + return err + } + } + + // if user namespaces are enabled we will create a subtree underneath the specified root + // with any/all specified remapped root uid/gid options on the daemon creating + // a new subdirectory with ownership set to the remapped uid/gid (so as to allow + // `chdir()` to work for containers namespaced to that uid/gid) + if config.RemappedRoot != "" { + config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID)) + logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) + // Create the root directory if it doesn't exist + if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil { + return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) + } + // we also need to verify that any pre-existing directories in the path to + // the graphroot won't block access to remapped root--if any pre-existing directory + // has strict permissions that don't allow "x", container start will fail, so + // better to warn and fail now + dirPath := config.Root + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if !idtools.CanAccess(dirPath, rootUID, rootGID) { + return fmt.Errorf("A subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories.", config.Root) + } + } + } + return nil +} + +// registerLinks writes the links to a file. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { + return nil + } + + for _, l := range hostConfig.Links { + name, alias, err := opts.ParseLink(l) + if err != nil { + return err + } + child, err := daemon.GetContainer(name) + if err != nil { + return fmt.Errorf("Could not get container for %s", name) + } + for child.HostConfig.NetworkMode.IsContainer() { + parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) + child, err = daemon.GetContainer(parts[1]) + if err != nil { + return fmt.Errorf("Could not get container for %s", parts[1]) + } + } + if child.HostConfig.NetworkMode.IsHost() { + return runconfig.ErrConflictHostNetworkAndLinks + } + if err := daemon.registerLink(container, child, alias); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + return container.WriteHostConfig() +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + return nil, err + } + s := &types.StatsJSON{} + cgs := stats.CgroupStats + if cgs != nil { + s.BlkioStats = types.BlkioStats{ + IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive), + IoServicedRecursive: copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive), + IoQueuedRecursive: copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive), + IoServiceTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive), + IoWaitTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive), + IoMergedRecursive: copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive), + IoTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive), + SectorsRecursive: copyBlkioEntry(cgs.BlkioStats.SectorsRecursive), + } + cpu := cgs.CpuStats + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: cpu.CpuUsage.TotalUsage, + PercpuUsage: cpu.CpuUsage.PercpuUsage, + UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, + UsageInUsermode: cpu.CpuUsage.UsageInUsermode, + }, + ThrottlingData: types.ThrottlingData{ + Periods: cpu.ThrottlingData.Periods, + ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, + ThrottledTime: cpu.ThrottlingData.ThrottledTime, + }, + } + mem := cgs.MemoryStats.Usage + s.MemoryStats = types.MemoryStats{ + Usage: mem.Usage, + MaxUsage: mem.MaxUsage, + Stats: cgs.MemoryStats.Stats, + Failcnt: mem.Failcnt, + Limit: mem.Limit, + } + // if the container does not set memory limit, use the machineMemory + if mem.Limit > daemon.machineMemory && daemon.machineMemory > 0 { + s.MemoryStats.Limit = daemon.machineMemory + } + if cgs.PidsStats != nil { + s.PidsStats = types.PidsStats{ + Current: cgs.PidsStats.Current, + } + } + } + s.Read, err = ptypes.Timestamp(stats.Timestamp) + if err != nil { + return nil, err + } + return s, nil +} + +// setDefaultIsolation determines the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} + +// setupDaemonProcess sets various settings for the daemon's process +func setupDaemonProcess(config *config.Config) error { + // setup the daemons oom_score_adj + return setupOOMScoreAdj(config.OOMScoreAdjust) +} + +func setupOOMScoreAdj(score int) error { + f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + stringScore := strconv.Itoa(score) + _, err = f.WriteString(stringScore) + if os.IsPermission(err) { + // Setting oom_score_adj does not work in an + // unprivileged container. Ignore the error, but log + // it if we appear not to be in that situation. + if !rsystem.RunningInUserNS() { + logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) + } + return nil + } + + return err +} + +func (daemon *Daemon) initCgroupsPath(path string) error { + if path == "/" || path == "." { + return nil + } + + if daemon.configStore.CPURealtimePeriod == 0 && daemon.configStore.CPURealtimeRuntime == 0 { + return nil + } + + // Recursively create cgroup to ensure that the system and all parent cgroups have values set + // for the period and runtime as this limits what the children can be set to. + daemon.initCgroupsPath(filepath.Dir(path)) + + mnt, root, err := cgroups.FindCgroupMountpointAndRoot("cpu") + if err != nil { + return err + } + // When docker is run inside docker, the root is based of the host cgroup. + // Should this be handled in runc/libcontainer/cgroups ? + if strings.HasPrefix(root, "/docker/") { + root = "/" + } + + path = filepath.Join(mnt, root, path) + sysinfo := sysinfo.New(true) + if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil { + return err + } + if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path); err != nil { + return err + } + return nil +} + +func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error { + if sysinfoPresent && configValue != 0 { + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + if err := ioutil.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + if daemon.configStore.SeccompProfile != "" { + daemon.seccompProfilePath = daemon.configStore.SeccompProfile + b, err := ioutil.ReadFile(daemon.configStore.SeccompProfile) + if err != nil { + return fmt.Errorf("opening seccomp profile (%s) failed: %v", daemon.configStore.SeccompProfile, err) + } + daemon.seccompProfile = b + } + return nil +} diff --git a/components/engine/daemon/daemon_unix_test.go b/components/engine/daemon/daemon_unix_test.go new file mode 100644 index 0000000000..e8afe629e0 --- /dev/null +++ b/components/engine/daemon/daemon_unix_test.go @@ -0,0 +1,313 @@ +// +build !windows,!solaris + +package daemon + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" +) + +// Unix test as uses settings which are not available on Windows +func TestAdjustCPUShares(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + hostConfig := &containertypes.HostConfig{ + Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, + } + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != linuxMinCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != linuxMaxCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares) + } + + hostConfig.CPUShares = 0 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} + +// Unix test as uses settings which are not available on Windows +func TestAdjustCPUSharesNoAdjustment(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + hostConfig := &containertypes.HostConfig{ + Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, + } + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != linuxMinCPUShares-1 { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != linuxMaxCPUShares+1 { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1) + } + + hostConfig.CPUShares = 0 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} + +// Unix test as uses settings which are not available on Windows +func TestParseSecurityOptWithDeprecatedColon(t *testing.T) { + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor=test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test seccomp + sp := "/path/to/seccomp_test.json" + config.SecurityOpt = []string{"seccomp=" + sp} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.SeccompProfile != sp { + t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, container.SeccompProfile) + } + + // test valid label + config.SecurityOpt = []string{"label=user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} + +func TestParseSecurityOpt(t *testing.T) { + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor=test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test seccomp + sp := "/path/to/seccomp_test.json" + config.SecurityOpt = []string{"seccomp=" + sp} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.SeccompProfile != sp { + t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, container.SeccompProfile) + } + + // test valid label + config.SecurityOpt = []string{"label=user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} + +func TestParseNNPSecurityOptions(t *testing.T) { + daemon := &Daemon{ + configStore: &config.Config{NoNewPrivileges: true}, + } + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test NNP when "daemon:true" and "no-new-privileges=false"" + config.SecurityOpt = []string{"no-new-privileges=false"} + + if err := daemon.parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err) + } + if container.NoNewPrivileges { + t.Fatalf("container.NoNewPrivileges should be FALSE: %v", container.NoNewPrivileges) + } + + // test NNP when "daemon:false" and "no-new-privileges=true"" + daemon.configStore.NoNewPrivileges = false + config.SecurityOpt = []string{"no-new-privileges=true"} + + if err := daemon.parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err) + } + if !container.NoNewPrivileges { + t.Fatalf("container.NoNewPrivileges should be TRUE: %v", container.NoNewPrivileges) + } +} + +func TestNetworkOptions(t *testing.T) { + daemon := &Daemon{} + dconfigCorrect := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "consul://localhost:8500", + ClusterAdvertise: "192.168.0.1:8000", + }, + } + + if _, err := daemon.networkOptions(dconfigCorrect, nil, nil); err != nil { + t.Fatalf("Expect networkOptions success, got error: %v", err) + } + + dconfigWrong := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "consul://localhost:8500://test://bbb", + }, + } + + if _, err := daemon.networkOptions(dconfigWrong, nil, nil); err == nil { + t.Fatal("Expected networkOptions error, got nil") + } +} + +func TestMigratePre17Volumes(t *testing.T) { + rootDir, err := ioutil.TempDir("", "test-daemon-volumes") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + volumeRoot := filepath.Join(rootDir, "volumes") + err = os.MkdirAll(volumeRoot, 0755) + if err != nil { + t.Fatal(err) + } + + containerRoot := filepath.Join(rootDir, "containers") + cid := "1234" + err = os.MkdirAll(filepath.Join(containerRoot, cid), 0755) + + vid := "5678" + vfsPath := filepath.Join(rootDir, "vfs", "dir", vid) + err = os.MkdirAll(vfsPath, 0755) + if err != nil { + t.Fatal(err) + } + + config := []byte(` + { + "ID": "` + cid + `", + "Volumes": { + "/foo": "` + vfsPath + `", + "/bar": "/foo", + "/quux": "/quux" + }, + "VolumesRW": { + "/foo": true, + "/bar": true, + "/quux": false + } + } + `) + + volStore, err := store.New(volumeRoot) + if err != nil { + t.Fatal(err) + } + drv, err := local.New(volumeRoot, 0, 0) + if err != nil { + t.Fatal(err) + } + volumedrivers.Register(drv, volume.DefaultDriverName) + + daemon := &Daemon{root: rootDir, repository: containerRoot, volumes: volStore} + err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600) + if err != nil { + t.Fatal(err) + } + c, err := daemon.load(cid) + if err != nil { + t.Fatal(err) + } + if err := daemon.verifyVolumesInfo(c); err != nil { + t.Fatal(err) + } + + expected := map[string]volume.MountPoint{ + "/foo": {Destination: "/foo", RW: true, Name: vid}, + "/bar": {Source: "/foo", Destination: "/bar", RW: true}, + "/quux": {Source: "/quux", Destination: "/quux", RW: false}, + } + for id, mp := range c.MountPoints { + x, exists := expected[id] + if !exists { + t.Fatal("volume not migrated") + } + if mp.Source != x.Source || mp.Destination != x.Destination || mp.RW != x.RW || mp.Name != x.Name { + t.Fatalf("got unexpected mountpoint, expected: %+v, got: %+v", x, mp) + } + } +} diff --git a/components/engine/daemon/daemon_unsupported.go b/components/engine/daemon/daemon_unsupported.go new file mode 100644 index 0000000000..cb1acf63d6 --- /dev/null +++ b/components/engine/daemon/daemon_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux,!freebsd,!windows,!solaris + +package daemon + +const platformSupported = false diff --git a/components/engine/daemon/daemon_windows.go b/components/engine/daemon/daemon_windows.go new file mode 100644 index 0000000000..efe1b1f6a0 --- /dev/null +++ b/components/engine/daemon/daemon_windows.go @@ -0,0 +1,629 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/datastore" + winlibnetwork "github.com/docker/libnetwork/drivers/windows" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + blkiodev "github.com/opencontainers/runc/libcontainer/configs" + "golang.org/x/sys/windows" +) + +const ( + defaultNetworkSpace = "172.16.0.0/12" + platformSupported = true + windowsMinCPUShares = 1 + windowsMaxCPUShares = 10000 + windowsMinCPUPercent = 1 + windowsMaxCPUPercent = 100 + windowsMinCPUCount = 1 + + errInvalidState = syscall.Errno(0x139F) +) + +// Windows has no concept of an execution state directory. So use config.Root here. +func getPluginExecRoot(root string) string { + return filepath.Join(root, "plugins") +} + +func getBlkioWeightDevices(config *containertypes.HostConfig) ([]blkiodev.WeightDevice, error) { + return nil, nil +} + +func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { + return parseSecurityOpt(container, hostConfig) +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + return nil +} + +func getBlkioReadIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioWriteIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioReadBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return nil +} + +func checkKernel() error { + return nil +} + +func (daemon *Daemon) getCgroupDriver() string { + return "" +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if hostConfig == nil { + return nil + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, isHyperv bool) ([]string, error) { + warnings := []string{} + + if !isHyperv { + // The processor resource controls are mutually exclusive on + // Windows Server Containers, the order of precedence is + // CPUCount first, then CPUShares, and CPUPercent last. + if resources.CPUCount > 0 { + if resources.CPUShares > 0 { + warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + logrus.Warn("Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + resources.CPUShares = 0 + } + if resources.CPUPercent > 0 { + warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + logrus.Warn("Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + resources.CPUPercent = 0 + } + } else if resources.CPUShares > 0 { + if resources.CPUPercent > 0 { + warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + logrus.Warn("Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + resources.CPUPercent = 0 + } + } + } + + if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { + return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) + } + if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { + return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) + } + if resources.CPUCount < 0 { + return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") + } + + if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { + return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") + } + if resources.NanoCPUs > 0 && resources.CPUShares > 0 { + return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") + } + // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. + // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. + if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { + return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) + } + + osv := system.GetOSVersion() + if resources.NanoCPUs > 0 && isHyperv && osv.Build < 16175 { + leftoverNanoCPUs := resources.NanoCPUs % 1e9 + if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { + resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 + warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) + warnings = append(warnings, warningString) + logrus.Warn(warningString) + } + } + + if len(resources.BlkioDeviceReadBps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") + } + if len(resources.BlkioDeviceReadIOps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") + } + if len(resources.BlkioDeviceWriteBps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") + } + if len(resources.BlkioDeviceWriteIOps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") + } + if resources.BlkioWeight > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") + } + if len(resources.BlkioWeightDevice) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") + } + if resources.CgroupParent != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") + } + if resources.CPUPeriod != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") + } + if resources.CpusetCpus != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") + } + if resources.CpusetMems != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") + } + if resources.KernelMemory != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") + } + if resources.MemoryReservation != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") + } + if resources.MemorySwap != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") + } + if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") + } + if resources.OomKillDisable != nil && *resources.OomKillDisable { + return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") + } + if resources.PidsLimit != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") + } + if len(resources.Ulimits) != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") + } + return warnings, nil +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + + hyperv := daemon.runAsHyperVContainer(hostConfig) + if !hyperv && system.IsWindowsClient() { + // @engine maintainers. This block should not be removed. It partially enforces licensing + // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. + return warnings, fmt.Errorf("Windows client operating systems only support Hyper-V containers") + } + + w, err := verifyContainerResources(&hostConfig.Resources, hyperv) + warnings = append(warnings, w...) + return warnings, err +} + +// reloadPlatform updates configuration with platform specific options +// and updates the passed attributes +func (daemon *Daemon) reloadPlatform(config *config.Config, attributes map[string]string) { +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *config.Config) error { + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + // Validate the OS version. Note that docker.exe must be manifested for this + // call to return the correct version. + osv := system.GetOSVersion() + if osv.MajorVersion < 10 { + return fmt.Errorf("This version of Windows does not support the docker daemon") + } + if osv.Build < 14393 { + return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") + } + + vmcompute := windows.NewLazySystemDLL("vmcompute.dll") + if vmcompute.Load() != nil { + return fmt.Errorf("Failed to load vmcompute.dll. Ensure that the Containers role is installed.") + } + + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *config.Config, driverName string) error { + return nil +} + +// configureMaxThreads sets the Go runtime max threads threshold +func configureMaxThreads(config *config.Config) error { + return nil +} + +func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, nil, nil) + if err != nil { + return nil, err + } + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") + if err != nil { + return nil, err + } + + // Remove networks not present in HNS + for _, v := range controller.Networks() { + options := v.Info().DriverOptions() + hnsid := options[winlibnetwork.HNSID] + found := false + + for _, v := range hnsresponse { + if v.Id == hnsid { + found = true + break + } + } + + if !found { + // global networks should not be deleted by local HNS + if v.Info().Scope() != datastore.GlobalScope { + err = v.Delete() + if err != nil { + logrus.Errorf("Error occurred when removing network %v", err) + } + } + } + } + + _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) + if err != nil { + return nil, err + } + + defaultNetworkExists := false + + if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { + options := network.Info().DriverOptions() + for _, v := range hnsresponse { + if options[winlibnetwork.HNSID] == v.Id { + defaultNetworkExists = true + break + } + } + } + + // discover and add HNS networks to windows + // network that exist are removed and added again + for _, v := range hnsresponse { + var n libnetwork.Network + s := func(current libnetwork.Network) bool { + options := current.Info().DriverOptions() + if options[winlibnetwork.HNSID] == v.Id { + n = current + return true + } + return false + } + + controller.WalkNetworks(s) + if n != nil { + // global networks should not be deleted by local HNS + if n.Info().Scope() == datastore.GlobalScope { + continue + } + v.Name = n.Name() + // This will not cause network delete from HNS as the network + // is not yet populated in the libnetwork windows driver + n.Delete() + } + + netOption := map[string]string{ + winlibnetwork.NetworkName: v.Name, + winlibnetwork.HNSID: v.Id, + } + + v4Conf := []*libnetwork.IpamConf{} + for _, subnet := range v.Subnets { + ipamV4Conf := libnetwork.IpamConf{} + ipamV4Conf.PreferredPool = subnet.AddressPrefix + ipamV4Conf.Gateway = subnet.GatewayAddress + v4Conf = append(v4Conf, &ipamV4Conf) + } + + name := v.Name + + // If there is no nat network create one from the first NAT network + // encountered if it doesn't already exist + if !defaultNetworkExists && + runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) && + n == nil { + name = runconfig.DefaultDaemonNetworkMode().NetworkName() + defaultNetworkExists = true + } + + v6Conf := []*libnetwork.IpamConf{} + _, err := controller.NewNetwork(strings.ToLower(v.Type), name, "", + libnetwork.NetworkOptionGeneric(options.Generic{ + netlabel.GenericData: netOption, + }), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + ) + + if err != nil { + logrus.Errorf("Error occurred when creating network %v", err) + } + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } + + return controller, nil +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { + if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { + return nil + } + + netOption := map[string]string{ + winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), + } + + var ipamOption libnetwork.NetworkOption + var subnetPrefix string + + if config.BridgeConfig.FixedCIDR != "" { + subnetPrefix = config.BridgeConfig.FixedCIDR + } else { + // TP5 doesn't support properly detecting subnet + osv := system.GetOSVersion() + if osv.Build < 14360 { + subnetPrefix = defaultNetworkSpace + } + } + + if subnetPrefix != "" { + ipamV4Conf := libnetwork.IpamConf{} + ipamV4Conf.PreferredPool = subnetPrefix + v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) + } + + _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", + libnetwork.NetworkOptionGeneric(options.Generic{ + netlabel.GenericData: netOption, + }), + ipamOption, + ) + + if err != nil { + return fmt.Errorf("Error creating default network: %v", err) + } + + return nil +} + +// registerLinks sets up links between containers and writes the +// configuration out for persistence. As of Windows TP4, links are not supported. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + return nil +} + +func (daemon *Daemon) cleanupMountsByID(in string) error { + return nil +} + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +func setupRemappedRoot(config *config.Config) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} + +func setupDaemonRoot(config *config.Config, rootDir string, rootUID, rootGID int) error { + config.Root = rootDir + // Create the root directory if it doesn't exists + if err := system.MkdirAllWithACL(config.Root, 0); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// runasHyperVContainer returns true if we are going to run as a Hyper-V container +func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { + if hostConfig.Isolation.IsDefault() { + // Container is set to use the default, so take the default from the daemon configuration + return daemon.defaultIsolation.IsHyperV() + } + + // Container is requesting an isolation mode. Honour it. + return hostConfig.Isolation.IsHyperV() + +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + // We do not mount if a Hyper-V container + if !daemon.runAsHyperVContainer(container.HostConfig) { + return daemon.Mount(container) + } + return nil +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + // We do not unmount if a Hyper-V container + if !daemon.runAsHyperVContainer(container.HostConfig) { + return daemon.Unmount(container) + } + return nil +} + +func driverOptions(config *config.Config) []nwconfig.Option { + return []nwconfig.Option{} +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + + // Obtain the stats from HCS via libcontainerd + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + return nil, err + } + + // Start with an empty structure + s := &types.StatsJSON{} + + // Populate the CPU/processor statistics + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: stats.Processor.TotalRuntime100ns, + UsageInKernelmode: stats.Processor.RuntimeKernel100ns, + UsageInUsermode: stats.Processor.RuntimeKernel100ns, + }, + } + + // Populate the memory statistics + s.MemoryStats = types.MemoryStats{ + Commit: stats.Memory.UsageCommitBytes, + CommitPeak: stats.Memory.UsageCommitPeakBytes, + PrivateWorkingSet: stats.Memory.UsagePrivateWorkingSetBytes, + } + + // Populate the storage statistics + s.StorageStats = types.StorageStats{ + ReadCountNormalized: stats.Storage.ReadCountNormalized, + ReadSizeBytes: stats.Storage.ReadSizeBytes, + WriteCountNormalized: stats.Storage.WriteCountNormalized, + WriteSizeBytes: stats.Storage.WriteSizeBytes, + } + + // Populate the network statistics + s.Networks = make(map[string]types.NetworkStats) + + for _, nstats := range stats.Network { + s.Networks[nstats.EndpointId] = types.NetworkStats{ + RxBytes: nstats.BytesReceived, + RxPackets: nstats.PacketsReceived, + RxDropped: nstats.DroppedPacketsIncoming, + TxBytes: nstats.BytesSent, + TxPackets: nstats.PacketsSent, + TxDropped: nstats.DroppedPacketsOutgoing, + } + } + + // Set the timestamp + s.Stats.Read = stats.Timestamp + s.Stats.NumProcs = platform.NumProcs() + + return s, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + daemon.defaultIsolation = containertypes.Isolation("process") + // On client SKUs, default to Hyper-V + if system.IsWindowsClient() { + daemon.defaultIsolation = containertypes.Isolation("hyperv") + } + for _, option := range daemon.configStore.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return err + } + key = strings.ToLower(key) + switch key { + + case "isolation": + if !containertypes.Isolation(val).IsValid() { + return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) + } + if containertypes.Isolation(val).IsHyperV() { + daemon.defaultIsolation = containertypes.Isolation("hyperv") + } + if containertypes.Isolation(val).IsProcess() { + if system.IsWindowsClient() { + // @engine maintainers. This block should not be removed. It partially enforces licensing + // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. + return fmt.Errorf("Windows client operating systems only support Hyper-V containers") + } + daemon.defaultIsolation = containertypes.Isolation("process") + } + default: + return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) + } + } + + logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} + +func setupDaemonProcess(config *config.Config) error { + return nil +} + +// verifyVolumesInfo is a no-op on windows. +// This is called during daemon initialization to migrate volumes from pre-1.7. +// volumes were not supported on windows pre-1.7 +func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + return nil +} diff --git a/components/engine/daemon/debugtrap.go b/components/engine/daemon/debugtrap.go new file mode 100644 index 0000000000..209048b589 --- /dev/null +++ b/components/engine/daemon/debugtrap.go @@ -0,0 +1,62 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/pkg/errors" +) + +const dataStructuresLogNameTemplate = "daemon-data-%s.log" + +// dumpDaemon appends the daemon datastructures into file in dir and returns full path +// to that file. +func (d *Daemon) dumpDaemon(dir string) (string, error) { + // Ensure we recover from a panic as we are doing this without any locking + defer func() { + recover() + }() + + path := filepath.Join(dir, fmt.Sprintf(dataStructuresLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) + if err != nil { + return "", errors.Wrap(err, "failed to open file to write the daemon datastructure dump") + } + defer f.Close() + + dump := struct { + containers interface{} + names interface{} + links interface{} + execs interface{} + volumes interface{} + images interface{} + layers interface{} + imageReferences interface{} + downloads interface{} + uploads interface{} + registry interface{} + plugins interface{} + }{ + containers: d.containers, + execs: d.execCommands, + volumes: d.volumes, + images: d.imageStore, + layers: d.layerStore, + imageReferences: d.referenceStore, + downloads: d.downloadManager, + uploads: d.uploadManager, + registry: d.RegistryService, + plugins: d.PluginStore, + names: d.nameIndex, + links: d.linkIndex, + } + + spew.Fdump(f, dump) // Does not return an error + f.Sync() + return path, nil +} diff --git a/components/engine/daemon/debugtrap_unix.go b/components/engine/daemon/debugtrap_unix.go new file mode 100644 index 0000000000..d650eb7f8c --- /dev/null +++ b/components/engine/daemon/debugtrap_unix.go @@ -0,0 +1,33 @@ +// +build !windows + +package daemon + +import ( + "os" + "os/signal" + "syscall" + + "github.com/Sirupsen/logrus" + stackdump "github.com/docker/docker/pkg/signal" +) + +func (d *Daemon) setupDumpStackTrap(root string) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + go func() { + for range c { + path, err := stackdump.DumpStacks(root) + if err != nil { + logrus.WithError(err).Error("failed to write goroutines dump") + } else { + logrus.Infof("goroutine stacks written to %s", path) + } + path, err = d.dumpDaemon(root) + if err != nil { + logrus.WithError(err).Error("failed to write daemon datastructure dump") + } else { + logrus.Infof("daemon datastructure dump written to %s", path) + } + } + }() +} diff --git a/components/engine/daemon/debugtrap_unsupported.go b/components/engine/daemon/debugtrap_unsupported.go new file mode 100644 index 0000000000..f5b9170907 --- /dev/null +++ b/components/engine/daemon/debugtrap_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!darwin,!freebsd,!windows,!solaris + +package daemon + +func (d *Daemon) setupDumpStackTrap(_ string) { + return +} diff --git a/components/engine/daemon/debugtrap_windows.go b/components/engine/daemon/debugtrap_windows.go new file mode 100644 index 0000000000..fb20c9d2c5 --- /dev/null +++ b/components/engine/daemon/debugtrap_windows.go @@ -0,0 +1,52 @@ +package daemon + +import ( + "fmt" + "os" + "syscall" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" +) + +func (d *Daemon) setupDumpStackTrap(root string) { + // Windows does not support signals like *nix systems. So instead of + // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be + // signaled. ACL'd to builtin administrators and local system + ev := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid()) + sd, err := winio.SddlToSecurityDescriptor("D:P(A;;GA;;;BA)(A;;GA;;;SY)") + if err != nil { + logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", ev, err.Error()) + return + } + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + h, err := system.CreateEvent(&sa, false, false, ev) + if h == 0 || err != nil { + logrus.Errorf("failed to create debug stackdump event %s: %s", ev, err.Error()) + return + } + go func() { + logrus.Debugf("Stackdump - waiting signal at %s", ev) + for { + syscall.WaitForSingleObject(h, syscall.INFINITE) + path, err := signal.DumpStacks(root) + if err != nil { + logrus.WithError(err).Error("failed to write goroutines dump") + } else { + logrus.Infof("goroutine stacks written to %s", path) + } + path, err = d.dumpDaemon(root) + if err != nil { + logrus.WithError(err).Error("failed to write daemon datastructure dump") + } else { + logrus.Infof("daemon datastructure dump written to %s", path) + } + } + }() +} diff --git a/components/engine/daemon/delete.go b/components/engine/daemon/delete.go new file mode 100644 index 0000000000..af709445ce --- /dev/null +++ b/components/engine/daemon/delete.go @@ -0,0 +1,169 @@ +package daemon + +import ( + "fmt" + "os" + "path" + "strings" + "time" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" + volumestore "github.com/docker/docker/volume/store" + "github.com/pkg/errors" +) + +// ContainerRm removes the container id from the filesystem. An error +// is returned if the container is not found, or if the remove +// fails. If the remove succeeds, the container name is released, and +// network links are removed. +func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + // Container state RemovalInProgress should be used to avoid races. + if inProgress := container.SetRemovalInProgress(); inProgress { + err := fmt.Errorf("removal of container %s is already in progress", name) + return apierrors.NewBadRequestError(err) + } + defer container.ResetRemovalInProgress() + + // check if container wasn't deregistered by previous rm since Get + if c := daemon.containers.Get(container.ID); c == nil { + return nil + } + + if config.RemoveLink { + return daemon.rmLink(container, name) + } + + err = daemon.cleanupContainer(container, config.ForceRemove, config.RemoveVolume) + containerActions.WithValues("delete").UpdateSince(start) + + return err +} + +func (daemon *Daemon) rmLink(container *container.Container, name string) error { + if name[0] != '/' { + name = "/" + name + } + parent, n := path.Split(name) + if parent == "/" { + return fmt.Errorf("Conflict, cannot remove the default name of the container") + } + + parent = strings.TrimSuffix(parent, "/") + pe, err := daemon.nameIndex.Get(parent) + if err != nil { + return fmt.Errorf("Cannot get parent %s for name %s", parent, name) + } + + daemon.releaseName(name) + parentContainer, _ := daemon.GetContainer(pe) + if parentContainer != nil { + daemon.linkIndex.unlink(name, container, parentContainer) + if err := daemon.updateNetwork(parentContainer); err != nil { + logrus.Debugf("Could not update network to remove link %s: %v", n, err) + } + } + return nil +} + +// cleanupContainer unregisters a container from the daemon, stops stats +// collection and cleanly removes contents and metadata from the filesystem. +func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove, removeVolume bool) (err error) { + if container.IsRunning() { + if !forceRemove { + state := container.StateString() + procedure := "Stop the container before attempting removal or force remove" + if state == "paused" { + procedure = "Unpause and then " + strings.ToLower(procedure) + } + err := fmt.Errorf("You cannot remove a %s container %s. %s", state, container.ID, procedure) + return apierrors.NewRequestConflictError(err) + } + if err := daemon.Kill(container); err != nil { + return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) + } + } + + // stop collection of stats for the container regardless + // if stats are currently getting collected. + daemon.statsCollector.StopCollection(container) + + if err = daemon.containerStop(container, 3); err != nil { + return err + } + + // Mark container dead. We don't want anybody to be restarting it. + container.SetDead() + + // Save container state to disk. So that if error happens before + // container meta file got removed from disk, then a restart of + // docker should not make a dead container alive. + if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) { + logrus.Errorf("Error saving dying container to disk: %v", err) + } + + // When container creation fails and `RWLayer` has not been created yet, we + // do not call `ReleaseRWLayer` + if container.RWLayer != nil { + metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) + layer.LogReleaseMetadata(metadata) + if err != nil && err != layer.ErrMountDoesNotExist { + return errors.Wrapf(err, "driver %q failed to remove root filesystem for %s", daemon.GraphDriverName(), container.ID) + } + } + + if err := system.EnsureRemoveAll(container.Root); err != nil { + return errors.Wrapf(err, "unable to remove filesystem for %s", container.ID) + } + + daemon.nameIndex.Delete(container.ID) + daemon.linkIndex.delete(container) + selinuxFreeLxcContexts(container.ProcessLabel) + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + if e := daemon.removeMountPoints(container, removeVolume); e != nil { + logrus.Error(e) + } + container.SetRemoved() + stateCtr.del(container.ID) + daemon.LogContainerEvent(container, "destroy") + return nil +} + +// VolumeRm removes the volume with the given name. +// If the volume is referenced by a container it is not removed +// This is called directly from the Engine API +func (daemon *Daemon) VolumeRm(name string, force bool) error { + err := daemon.volumeRm(name) + if err != nil && volumestore.IsInUse(err) { + return apierrors.NewRequestConflictError(err) + } + if err == nil || force { + daemon.volumes.Purge(name) + return nil + } + return err +} + +func (daemon *Daemon) volumeRm(name string) error { + v, err := daemon.volumes.Get(name) + if err != nil { + return err + } + + if err := daemon.volumes.Remove(v); err != nil { + return errors.Wrap(err, "unable to remove volume") + } + daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) + return nil +} diff --git a/components/engine/daemon/delete_test.go b/components/engine/daemon/delete_test.go new file mode 100644 index 0000000000..4664cd9869 --- /dev/null +++ b/components/engine/daemon/delete_test.go @@ -0,0 +1,98 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/testutil" + "github.com/stretchr/testify/require" +) + +func newDaemonWithTmpRoot(t *testing.T) (*Daemon, func()) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + require.NoError(t, err) + d := &Daemon{ + repository: tmp, + root: tmp, + } + d.containers = container.NewMemoryStore() + return d, func() { os.RemoveAll(tmp) } +} + +func newContainerWithState(state *container.State) *container.Container { + return &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "test", + State: state, + Config: &containertypes.Config{}, + }, + } + +} + +// TestContainerDelete tests that a useful error message and instructions is +// given when attempting to remove a container (#30842) +func TestContainerDelete(t *testing.T) { + tt := []struct { + errMsg string + fixMsg string + initContainer func() *container.Container + }{ + // a paused container + { + errMsg: "cannot remove a paused container", + fixMsg: "Unpause and then stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + return newContainerWithState(&container.State{Paused: true, Running: true}) + }}, + // a restarting container + { + errMsg: "cannot remove a restarting container", + fixMsg: "Stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + c := newContainerWithState(container.NewState()) + c.SetRunning(0, true) + c.SetRestarting(&container.ExitStatus{}) + return c + }}, + // a running container + { + errMsg: "cannot remove a running container", + fixMsg: "Stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + return newContainerWithState(&container.State{Running: true}) + }}, + } + + for _, te := range tt { + c := te.initContainer() + d, cleanup := newDaemonWithTmpRoot(t) + defer cleanup() + d.containers.Add(c.ID, c) + + err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: false}) + testutil.ErrorContains(t, err, te.errMsg) + testutil.ErrorContains(t, err, te.fixMsg) + } +} + +func TestContainerDoubleDelete(t *testing.T) { + c := newContainerWithState(container.NewState()) + + // Mark the container as having a delete in progress + c.SetRemovalInProgress() + + d, cleanup := newDaemonWithTmpRoot(t) + defer cleanup() + d.containers.Add(c.ID, c) + + // Try to remove the container when its state is removalInProgress. + // It should return an error indicating it is under removal progress. + err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true}) + testutil.ErrorContains(t, err, fmt.Sprintf("removal of container %s is already in progress", c.ID)) +} diff --git a/components/engine/daemon/dependency.go b/components/engine/daemon/dependency.go new file mode 100644 index 0000000000..83144e6865 --- /dev/null +++ b/components/engine/daemon/dependency.go @@ -0,0 +1,17 @@ +package daemon + +import ( + "github.com/docker/swarmkit/agent/exec" +) + +// SetContainerDependencyStore sets the dependency store backend for the container +func (daemon *Daemon) SetContainerDependencyStore(name string, store exec.DependencyGetter) error { + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.DependencyStore = store + + return nil +} diff --git a/components/engine/daemon/discovery/discovery.go b/components/engine/daemon/discovery/discovery.go new file mode 100644 index 0000000000..509155cbbd --- /dev/null +++ b/components/engine/daemon/discovery/discovery.go @@ -0,0 +1,202 @@ +package discovery + +import ( + "errors" + "fmt" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/discovery" + + // Register the libkv backends for discovery. + _ "github.com/docker/docker/pkg/discovery/kv" +) + +const ( + // defaultDiscoveryHeartbeat is the default value for discovery heartbeat interval. + defaultDiscoveryHeartbeat = 20 * time.Second + // defaultDiscoveryTTLFactor is the default TTL factor for discovery + defaultDiscoveryTTLFactor = 3 +) + +// ErrDiscoveryDisabled is an error returned if the discovery is disabled +var ErrDiscoveryDisabled = errors.New("discovery is disabled") + +// Reloader is the discovery reloader of the daemon +type Reloader interface { + discovery.Watcher + Stop() + Reload(backend, address string, clusterOpts map[string]string) error + ReadyCh() <-chan struct{} +} + +type daemonDiscoveryReloader struct { + backend discovery.Backend + ticker *time.Ticker + term chan bool + readyCh chan struct{} +} + +func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + return d.backend.Watch(stopCh) +} + +func (d *daemonDiscoveryReloader) ReadyCh() <-chan struct{} { + return d.readyCh +} + +func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) { + var ( + heartbeat = defaultDiscoveryHeartbeat + ttl = defaultDiscoveryTTLFactor * defaultDiscoveryHeartbeat + ) + + if hb, ok := clusterOpts["discovery.heartbeat"]; ok { + h, err := strconv.Atoi(hb) + if err != nil { + return time.Duration(0), time.Duration(0), err + } + + if h <= 0 { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.heartbeat must be positive") + } + + heartbeat = time.Duration(h) * time.Second + ttl = defaultDiscoveryTTLFactor * heartbeat + } + + if tstr, ok := clusterOpts["discovery.ttl"]; ok { + t, err := strconv.Atoi(tstr) + if err != nil { + return time.Duration(0), time.Duration(0), err + } + + if t <= 0 { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.ttl must be positive") + } + + ttl = time.Duration(t) * time.Second + + if _, ok := clusterOpts["discovery.heartbeat"]; !ok { + heartbeat = time.Duration(t) * time.Second / time.Duration(defaultDiscoveryTTLFactor) + } + + if ttl <= heartbeat { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.ttl timer must be greater than discovery.heartbeat") + } + } + + return heartbeat, ttl, nil +} + +// Init initializes the nodes discovery subsystem by connecting to the specified backend +// and starts a registration loop to advertise the current node under the specified address. +func Init(backendAddress, advertiseAddress string, clusterOpts map[string]string) (Reloader, error) { + heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) + if err != nil { + return nil, err + } + + reloader := &daemonDiscoveryReloader{ + backend: backend, + ticker: time.NewTicker(heartbeat), + term: make(chan bool), + readyCh: make(chan struct{}), + } + // We call Register() on the discovery backend in a loop for the whole lifetime of the daemon, + // but we never actually Watch() for nodes appearing and disappearing for the moment. + go reloader.advertiseHeartbeat(advertiseAddress) + return reloader, nil +} + +// advertiseHeartbeat registers the current node against the discovery backend using the specified +// address. The function never returns, as registration against the backend comes with a TTL and +// requires regular heartbeats. +func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) { + var ready bool + if err := d.initHeartbeat(address); err == nil { + ready = true + close(d.readyCh) + } else { + logrus.WithError(err).Debug("First discovery heartbeat failed") + } + + for { + select { + case <-d.ticker.C: + if err := d.backend.Register(address); err != nil { + logrus.Warnf("Registering as %q in discovery failed: %v", address, err) + } else { + if !ready { + close(d.readyCh) + ready = true + } + } + case <-d.term: + return + } + } +} + +// initHeartbeat is used to do the first heartbeat. It uses a tight loop until +// either the timeout period is reached or the heartbeat is successful and returns. +func (d *daemonDiscoveryReloader) initHeartbeat(address string) error { + // Setup a short ticker until the first heartbeat has succeeded + t := time.NewTicker(500 * time.Millisecond) + defer t.Stop() + // timeout makes sure that after a period of time we stop being so aggressive trying to reach the discovery service + timeout := time.After(60 * time.Second) + + for { + select { + case <-timeout: + return errors.New("timeout waiting for initial discovery") + case <-d.term: + return errors.New("terminated") + case <-t.C: + if err := d.backend.Register(address); err == nil { + return nil + } + } + } +} + +// Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address. +func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error { + d.Stop() + + heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) + if err != nil { + return err + } + + d.backend = backend + d.ticker = time.NewTicker(heartbeat) + d.readyCh = make(chan struct{}) + + go d.advertiseHeartbeat(advertiseAddress) + return nil +} + +// Stop terminates the discovery advertising. +func (d *daemonDiscoveryReloader) Stop() { + d.ticker.Stop() + d.term <- true +} + +func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) { + heartbeat, ttl, err := discoveryOpts(clusterOpts) + if err != nil { + return 0, nil, err + } + + backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts) + if err != nil { + return 0, nil, err + } + return heartbeat, backend, nil +} diff --git a/components/engine/daemon/discovery/discovery_test.go b/components/engine/daemon/discovery/discovery_test.go new file mode 100644 index 0000000000..781442939b --- /dev/null +++ b/components/engine/daemon/discovery/discovery_test.go @@ -0,0 +1,111 @@ +package discovery + +import ( + "fmt" + "testing" + "time" +) + +func TestDiscoveryOpts(t *testing.T) { + clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"} + heartbeat, ttl, err := discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("discovery.ttl < discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("discovery.ttl == discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("negative discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("negative discovery.ttl must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "invalid"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("invalid discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.ttl": "invalid"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("invalid discovery.ttl must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != 10*time.Second { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) + } + + if ttl != 20*time.Second { + t.Fatalf("TTL - Expected : %v, Actual : %v", 20*time.Second, ttl) + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != 10*time.Second { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) + } + + expected := 10 * defaultDiscoveryTTLFactor * time.Second + if ttl != expected { + t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) + } + + clusterOpts = map[string]string{"discovery.ttl": "30"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if ttl != 30*time.Second { + t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl) + } + + expected = 30 * time.Second / defaultDiscoveryTTLFactor + if heartbeat != expected { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat) + } + + discaveryTTL := fmt.Sprintf("%d", defaultDiscoveryTTLFactor-1) + clusterOpts = map[string]string{"discovery.ttl": discaveryTTL} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil && heartbeat == 0 { + t.Fatal("discovery.heartbeat must be positive") + } + + clusterOpts = map[string]string{} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != defaultDiscoveryHeartbeat { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", defaultDiscoveryHeartbeat, heartbeat) + } + + expected = defaultDiscoveryHeartbeat * defaultDiscoveryTTLFactor + if ttl != expected { + t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) + } +} diff --git a/components/engine/daemon/disk_usage.go b/components/engine/daemon/disk_usage.go new file mode 100644 index 0000000000..83de00ab2e --- /dev/null +++ b/components/engine/daemon/disk_usage.go @@ -0,0 +1,117 @@ +package daemon + +import ( + "fmt" + "sync/atomic" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/volume" + "github.com/opencontainers/go-digest" +) + +func (daemon *Daemon) getLayerRefs() map[layer.ChainID]int { + tmpImages := daemon.imageStore.Map() + layerRefs := map[layer.ChainID]int{} + for id, img := range tmpImages { + dgst := digest.Digest(id) + if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 { + continue + } + + rootFS := *img.RootFS + rootFS.DiffIDs = nil + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + layerRefs[chid]++ + } + } + + return layerRefs +} + +// SystemDiskUsage returns information about the daemon data disk usage +func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) { + if !atomic.CompareAndSwapInt32(&daemon.diskUsageRunning, 0, 1) { + return nil, fmt.Errorf("a disk usage operation is already running") + } + defer atomic.StoreInt32(&daemon.diskUsageRunning, 0) + + // Retrieve container list + allContainers, err := daemon.Containers(&types.ContainerListOptions{ + Size: true, + All: true, + }) + if err != nil { + return nil, fmt.Errorf("failed to retrieve container list: %v", err) + } + + // Get all top images with extra attributes + allImages, err := daemon.Images(filters.NewArgs(), false, true) + if err != nil { + return nil, fmt.Errorf("failed to retrieve image list: %v", err) + } + + // Get all local volumes + allVolumes := []*types.Volume{} + getLocalVols := func(v volume.Volume) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + name := v.Name() + refs := daemon.volumes.Refs(v) + + tv := volumeToAPIType(v) + sz, err := directory.Size(v.Path()) + if err != nil { + logrus.Warnf("failed to determine size of volume %v", name) + sz = -1 + } + tv.UsageData = &types.VolumeUsageData{Size: sz, RefCount: int64(len(refs))} + allVolumes = append(allVolumes, tv) + } + + return nil + } + + err = daemon.traverseLocalVolumes(getLocalVols) + if err != nil { + return nil, err + } + + // Get total layers size on disk + layerRefs := daemon.getLayerRefs() + allLayers := daemon.layerStore.Map() + var allLayersSize int64 + for _, l := range allLayers { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + size, err := l.DiffSize() + if err == nil { + if _, ok := layerRefs[l.ChainID()]; ok { + allLayersSize += size + } else { + logrus.Warnf("found leaked image layer %v", l.ChainID()) + } + } else { + logrus.Warnf("failed to get diff size for layer %v", l.ChainID()) + } + } + } + + return &types.DiskUsage{ + LayersSize: allLayersSize, + Containers: allContainers, + Volumes: allVolumes, + Images: allImages, + }, nil +} diff --git a/components/engine/daemon/errors.go b/components/engine/daemon/errors.go new file mode 100644 index 0000000000..96c30df2ea --- /dev/null +++ b/components/engine/daemon/errors.go @@ -0,0 +1,41 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/errors" +) + +func (d *Daemon) imageNotExistToErrcode(err error) error { + if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { + return errors.NewRequestNotFoundError(dne) + } + return err +} + +type errNotRunning struct { + containerID string +} + +func (e errNotRunning) Error() string { + return fmt.Sprintf("Container %s is not running", e.containerID) +} + +func (e errNotRunning) ContainerIsRunning() bool { + return false +} + +func errContainerIsRestarting(containerID string) error { + err := fmt.Errorf("Container %s is restarting, wait until the container is running", containerID) + return errors.NewRequestConflictError(err) +} + +func errExecNotFound(id string) error { + err := fmt.Errorf("No such exec instance '%s' found in daemon", id) + return errors.NewRequestNotFoundError(err) +} + +func errExecPaused(id string) error { + err := fmt.Errorf("Container %s is paused, unpause the container before exec", id) + return errors.NewRequestConflictError(err) +} diff --git a/components/engine/daemon/events.go b/components/engine/daemon/events.go new file mode 100644 index 0000000000..f5d188cf0b --- /dev/null +++ b/components/engine/daemon/events.go @@ -0,0 +1,322 @@ +package daemon + +import ( + "context" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + daemonevents "github.com/docker/docker/daemon/events" + "github.com/docker/libnetwork" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +var ( + clusterEventAction = map[swarmapi.WatchActionKind]string{ + swarmapi.WatchActionKindCreate: "create", + swarmapi.WatchActionKindUpdate: "update", + swarmapi.WatchActionKindRemove: "remove", + } +) + +// LogContainerEvent generates an event related to a container with only the default attributes. +func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) { + daemon.LogContainerEventWithAttributes(container, action, map[string]string{}) +} + +// LogContainerEventWithAttributes generates an event related to a container with specific given attributes. +func (daemon *Daemon) LogContainerEventWithAttributes(container *container.Container, action string, attributes map[string]string) { + copyAttributes(attributes, container.Config.Labels) + if container.Config.Image != "" { + attributes["image"] = container.Config.Image + } + attributes["name"] = strings.TrimLeft(container.Name, "/") + + actor := events.Actor{ + ID: container.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.ContainerEventType, actor) +} + +// LogImageEvent generates an event related to an image with only the default attributes. +func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { + daemon.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) +} + +// LogImageEventWithAttributes generates an event related to an image with specific given attributes. +func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { + img, err := daemon.GetImage(imageID) + if err == nil && img.Config != nil { + // image has not been removed yet. + // it could be missing if the event is `delete`. + copyAttributes(attributes, img.Config.Labels) + } + if refName != "" { + attributes["name"] = refName + } + actor := events.Actor{ + ID: imageID, + Attributes: attributes, + } + + daemon.EventsService.Log(action, events.ImageEventType, actor) +} + +// LogPluginEvent generates an event related to a plugin with only the default attributes. +func (daemon *Daemon) LogPluginEvent(pluginID, refName, action string) { + daemon.LogPluginEventWithAttributes(pluginID, refName, action, map[string]string{}) +} + +// LogPluginEventWithAttributes generates an event related to a plugin with specific given attributes. +func (daemon *Daemon) LogPluginEventWithAttributes(pluginID, refName, action string, attributes map[string]string) { + attributes["name"] = refName + actor := events.Actor{ + ID: pluginID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.PluginEventType, actor) +} + +// LogVolumeEvent generates an event related to a volume. +func (daemon *Daemon) LogVolumeEvent(volumeID, action string, attributes map[string]string) { + actor := events.Actor{ + ID: volumeID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.VolumeEventType, actor) +} + +// LogNetworkEvent generates an event related to a network with only the default attributes. +func (daemon *Daemon) LogNetworkEvent(nw libnetwork.Network, action string) { + daemon.LogNetworkEventWithAttributes(nw, action, map[string]string{}) +} + +// LogNetworkEventWithAttributes generates an event related to a network with specific given attributes. +func (daemon *Daemon) LogNetworkEventWithAttributes(nw libnetwork.Network, action string, attributes map[string]string) { + attributes["name"] = nw.Name() + attributes["type"] = nw.Type() + actor := events.Actor{ + ID: nw.ID(), + Attributes: attributes, + } + daemon.EventsService.Log(action, events.NetworkEventType, actor) +} + +// LogDaemonEventWithAttributes generates an event related to the daemon itself with specific given attributes. +func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) { + if daemon.EventsService != nil { + if info, err := daemon.SystemInfo(); err == nil && info.Name != "" { + attributes["name"] = info.Name + } + actor := events.Actor{ + ID: daemon.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.DaemonEventType, actor) + } +} + +// SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. +func (daemon *Daemon) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) { + ef := daemonevents.NewFilter(filter) + return daemon.EventsService.SubscribeTopic(since, until, ef) +} + +// UnsubscribeFromEvents stops the event subscription for a client by closing the +// channel where the daemon sends events to. +func (daemon *Daemon) UnsubscribeFromEvents(listener chan interface{}) { + daemon.EventsService.Evict(listener) +} + +// copyAttributes guarantees that labels are not mutated by event triggers. +func copyAttributes(attributes, labels map[string]string) { + if labels == nil { + return + } + for k, v := range labels { + attributes[k] = v + } +} + +// ProcessClusterNotifications gets changes from store and add them to event list +func (daemon *Daemon) ProcessClusterNotifications(ctx context.Context, watchStream chan *swarmapi.WatchMessage) { + for { + select { + case <-ctx.Done(): + return + case message, ok := <-watchStream: + if !ok { + logrus.Debug("cluster event channel has stopped") + return + } + daemon.generateClusterEvent(message) + } + } +} + +func (daemon *Daemon) generateClusterEvent(msg *swarmapi.WatchMessage) { + for _, event := range msg.Events { + if event.Object == nil { + logrus.Errorf("event without object: %v", event) + continue + } + switch v := event.Object.GetObject().(type) { + case *swarmapi.Object_Node: + daemon.logNodeEvent(event.Action, v.Node, event.OldObject.GetNode()) + case *swarmapi.Object_Service: + daemon.logServiceEvent(event.Action, v.Service, event.OldObject.GetService()) + case *swarmapi.Object_Network: + daemon.logNetworkEvent(event.Action, v.Network, event.OldObject.GetNetwork()) + case *swarmapi.Object_Secret: + daemon.logSecretEvent(event.Action, v.Secret, event.OldObject.GetSecret()) + default: + logrus.Warnf("unrecognized event: %v", event) + } + } +} + +func (daemon *Daemon) logNetworkEvent(action swarmapi.WatchActionKind, net *swarmapi.Network, oldNet *swarmapi.Network) { + attributes := map[string]string{ + "name": net.Spec.Annotations.Name, + } + eventTime := eventTimestamp(net.Meta, action) + daemon.logClusterEvent(action, net.ID, "network", attributes, eventTime) +} + +func (daemon *Daemon) logSecretEvent(action swarmapi.WatchActionKind, secret *swarmapi.Secret, oldSecret *swarmapi.Secret) { + attributes := map[string]string{ + "name": secret.Spec.Annotations.Name, + } + eventTime := eventTimestamp(secret.Meta, action) + daemon.logClusterEvent(action, secret.ID, "secret", attributes, eventTime) +} + +func (daemon *Daemon) logNodeEvent(action swarmapi.WatchActionKind, node *swarmapi.Node, oldNode *swarmapi.Node) { + name := node.Spec.Annotations.Name + if name == "" && node.Description != nil { + name = node.Description.Hostname + } + attributes := map[string]string{ + "name": name, + } + eventTime := eventTimestamp(node.Meta, action) + // In an update event, display the changes in attributes + if action == swarmapi.WatchActionKindUpdate && oldNode != nil { + if node.Spec.Availability != oldNode.Spec.Availability { + attributes["availability.old"] = strings.ToLower(oldNode.Spec.Availability.String()) + attributes["availability.new"] = strings.ToLower(node.Spec.Availability.String()) + } + if node.Role != oldNode.Role { + attributes["role.old"] = strings.ToLower(oldNode.Role.String()) + attributes["role.new"] = strings.ToLower(node.Role.String()) + } + if node.Status.State != oldNode.Status.State { + attributes["state.old"] = strings.ToLower(oldNode.Status.State.String()) + attributes["state.new"] = strings.ToLower(node.Status.State.String()) + } + // This handles change within manager role + if node.ManagerStatus != nil && oldNode.ManagerStatus != nil { + // leader change + if node.ManagerStatus.Leader != oldNode.ManagerStatus.Leader { + if node.ManagerStatus.Leader { + attributes["leader.old"] = "false" + attributes["leader.new"] = "true" + } else { + attributes["leader.old"] = "true" + attributes["leader.new"] = "false" + } + } + if node.ManagerStatus.Reachability != oldNode.ManagerStatus.Reachability { + attributes["reachability.old"] = strings.ToLower(oldNode.ManagerStatus.Reachability.String()) + attributes["reachability.new"] = strings.ToLower(node.ManagerStatus.Reachability.String()) + } + } + } + + daemon.logClusterEvent(action, node.ID, "node", attributes, eventTime) +} + +func (daemon *Daemon) logServiceEvent(action swarmapi.WatchActionKind, service *swarmapi.Service, oldService *swarmapi.Service) { + attributes := map[string]string{ + "name": service.Spec.Annotations.Name, + } + eventTime := eventTimestamp(service.Meta, action) + + if action == swarmapi.WatchActionKindUpdate && oldService != nil { + // check image + if x, ok := service.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok { + containerSpec := x.Container + if y, ok := oldService.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok { + oldContainerSpec := y.Container + if containerSpec.Image != oldContainerSpec.Image { + attributes["image.old"] = oldContainerSpec.Image + attributes["image.new"] = containerSpec.Image + } + } else { + // This should not happen. + logrus.Errorf("service %s runtime changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.Task.GetRuntime(), service.Spec.Task.GetRuntime()) + } + } + // check replicated count change + if x, ok := service.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok { + replicas := x.Replicated.Replicas + if y, ok := oldService.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok { + oldReplicas := y.Replicated.Replicas + if replicas != oldReplicas { + attributes["replicas.old"] = strconv.FormatUint(oldReplicas, 10) + attributes["replicas.new"] = strconv.FormatUint(replicas, 10) + } + } else { + // This should not happen. + logrus.Errorf("service %s mode changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.GetMode(), service.Spec.GetMode()) + } + } + if service.UpdateStatus != nil { + if oldService.UpdateStatus == nil { + attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String()) + } else if service.UpdateStatus.State != oldService.UpdateStatus.State { + attributes["updatestate.old"] = strings.ToLower(oldService.UpdateStatus.State.String()) + attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String()) + } + } + } + daemon.logClusterEvent(action, service.ID, "service", attributes, eventTime) +} + +func (daemon *Daemon) logClusterEvent(action swarmapi.WatchActionKind, id, eventType string, attributes map[string]string, eventTime time.Time) { + actor := events.Actor{ + ID: id, + Attributes: attributes, + } + + jm := events.Message{ + Action: clusterEventAction[action], + Type: eventType, + Actor: actor, + Scope: "swarm", + Time: eventTime.UTC().Unix(), + TimeNano: eventTime.UTC().UnixNano(), + } + daemon.EventsService.PublishMessage(jm) +} + +func eventTimestamp(meta swarmapi.Meta, action swarmapi.WatchActionKind) time.Time { + var eventTime time.Time + switch action { + case swarmapi.WatchActionKindCreate: + eventTime, _ = gogotypes.TimestampFromProto(meta.CreatedAt) + case swarmapi.WatchActionKindUpdate: + eventTime, _ = gogotypes.TimestampFromProto(meta.UpdatedAt) + case swarmapi.WatchActionKindRemove: + // There is no timestamp from store message for remove operations. + // Use current time. + eventTime = time.Now() + } + return eventTime +} diff --git a/components/engine/daemon/events/events.go b/components/engine/daemon/events/events.go new file mode 100644 index 0000000000..d1529e1cea --- /dev/null +++ b/components/engine/daemon/events/events.go @@ -0,0 +1,165 @@ +package events + +import ( + "sync" + "time" + + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/pkg/pubsub" +) + +const ( + eventsLimit = 256 + bufferSize = 1024 +) + +// Events is pubsub channel for events generated by the engine. +type Events struct { + mu sync.Mutex + events []eventtypes.Message + pub *pubsub.Publisher +} + +// New returns new *Events instance +func New() *Events { + return &Events{ + events: make([]eventtypes.Message, 0, eventsLimit), + pub: pubsub.NewPublisher(100*time.Millisecond, bufferSize), + } +} + +// Subscribe adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion), and a function to call +// to stop the stream of events. +func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { + eventSubscribers.Inc() + e.mu.Lock() + current := make([]eventtypes.Message, len(e.events)) + copy(current, e.events) + l := e.pub.Subscribe() + e.mu.Unlock() + + cancel := func() { + e.Evict(l) + } + return current, l, cancel +} + +// SubscribeTopic adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion). +func (e *Events) SubscribeTopic(since, until time.Time, ef *Filter) ([]eventtypes.Message, chan interface{}) { + eventSubscribers.Inc() + e.mu.Lock() + + var topic func(m interface{}) bool + if ef != nil && ef.filter.Len() > 0 { + topic = func(m interface{}) bool { return ef.Include(m.(eventtypes.Message)) } + } + + buffered := e.loadBufferedEvents(since, until, topic) + + var ch chan interface{} + if topic != nil { + ch = e.pub.SubscribeTopic(topic) + } else { + // Subscribe to all events if there are no filters + ch = e.pub.Subscribe() + } + + e.mu.Unlock() + return buffered, ch +} + +// Evict evicts listener from pubsub +func (e *Events) Evict(l chan interface{}) { + eventSubscribers.Dec() + e.pub.Evict(l) +} + +// Log creates a local scope message and publishes it +func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { + now := time.Now().UTC() + jm := eventtypes.Message{ + Action: action, + Type: eventType, + Actor: actor, + Scope: "local", + Time: now.Unix(), + TimeNano: now.UnixNano(), + } + + // fill deprecated fields for container and images + switch eventType { + case eventtypes.ContainerEventType: + jm.ID = actor.ID + jm.Status = action + jm.From = actor.Attributes["image"] + case eventtypes.ImageEventType: + jm.ID = actor.ID + jm.Status = action + } + + e.PublishMessage(jm) +} + +// PublishMessage broadcasts event to listeners. Each listener has 100 milliseconds to +// receive the event or it will be skipped. +func (e *Events) PublishMessage(jm eventtypes.Message) { + eventsCounter.Inc() + + e.mu.Lock() + if len(e.events) == cap(e.events) { + // discard oldest event + copy(e.events, e.events[1:]) + e.events[len(e.events)-1] = jm + } else { + e.events = append(e.events, jm) + } + e.mu.Unlock() + e.pub.Publish(jm) +} + +// SubscribersCount returns number of event listeners +func (e *Events) SubscribersCount() int { + return e.pub.Len() +} + +// loadBufferedEvents iterates over the cached events in the buffer +// and returns those that were emitted between two specific dates. +// It uses `time.Unix(seconds, nanoseconds)` to generate valid dates with those arguments. +// It filters those buffered messages with a topic function if it's not nil, otherwise it adds all messages. +func (e *Events) loadBufferedEvents(since, until time.Time, topic func(interface{}) bool) []eventtypes.Message { + var buffered []eventtypes.Message + if since.IsZero() && until.IsZero() { + return buffered + } + + var sinceNanoUnix int64 + if !since.IsZero() { + sinceNanoUnix = since.UnixNano() + } + + var untilNanoUnix int64 + if !until.IsZero() { + untilNanoUnix = until.UnixNano() + } + + for i := len(e.events) - 1; i >= 0; i-- { + ev := e.events[i] + + if ev.TimeNano < sinceNanoUnix { + break + } + + if untilNanoUnix > 0 && ev.TimeNano > untilNanoUnix { + continue + } + + if topic == nil || topic(ev) { + buffered = append([]eventtypes.Message{ev}, buffered...) + } + } + return buffered +} diff --git a/components/engine/daemon/events/events_test.go b/components/engine/daemon/events/events_test.go new file mode 100644 index 0000000000..ffb4e50bf9 --- /dev/null +++ b/components/engine/daemon/events/events_test.go @@ -0,0 +1,275 @@ +package events + +import ( + "fmt" + "testing" + "time" + + "github.com/docker/docker/api/types/events" + timetypes "github.com/docker/docker/api/types/time" + eventstestutils "github.com/docker/docker/daemon/events/testutils" +) + +func TestEventsLog(t *testing.T) { + e := New() + _, l1, _ := e.Subscribe() + _, l2, _ := e.Subscribe() + defer e.Evict(l1) + defer e.Evict(l2) + count := e.SubscribersCount() + if count != 2 { + t.Fatalf("Must be 2 subscribers, got %d", count) + } + actor := events.Actor{ + ID: "cont", + Attributes: map[string]string{"image": "image"}, + } + e.Log("test", events.ContainerEventType, actor) + select { + case msg := <-l1: + jmsg, ok := msg.(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } + select { + case msg := <-l2: + jmsg, ok := msg.(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } +} + +func TestEventsLogTimeout(t *testing.T) { + e := New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + c := make(chan struct{}) + go func() { + actor := events.Actor{ + ID: "image", + } + e.Log("test", events.ImageEventType, actor) + close(c) + }() + + select { + case <-c: + case <-time.After(time.Second): + t.Fatal("Timeout publishing message") + } +} + +func TestLogEvents(t *testing.T) { + e := New() + + for i := 0; i < eventsLimit+16; i++ { + action := fmt.Sprintf("action_%d", i) + id := fmt.Sprintf("cont_%d", i) + from := fmt.Sprintf("image_%d", i) + + actor := events.Actor{ + ID: id, + Attributes: map[string]string{"image": from}, + } + e.Log(action, events.ContainerEventType, actor) + } + time.Sleep(50 * time.Millisecond) + current, l, _ := e.Subscribe() + for i := 0; i < 10; i++ { + num := i + eventsLimit + 16 + action := fmt.Sprintf("action_%d", num) + id := fmt.Sprintf("cont_%d", num) + from := fmt.Sprintf("image_%d", num) + + actor := events.Actor{ + ID: id, + Attributes: map[string]string{"image": from}, + } + e.Log(action, events.ContainerEventType, actor) + } + if len(e.events) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) + } + + var msgs []events.Message + for len(msgs) < 10 { + m := <-l + jm, ok := (m).(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", m) + } + msgs = append(msgs, jm) + } + if len(current) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(current)) + } + first := current[0] + if first.Status != "action_16" { + t.Fatalf("First action is %s, must be action_16", first.Status) + } + last := current[len(current)-1] + if last.Status != "action_271" { + t.Fatalf("Last action is %s, must be action_271", last.Status) + } + + firstC := msgs[0] + if firstC.Status != "action_272" { + t.Fatalf("First action is %s, must be action_272", firstC.Status) + } + lastC := msgs[len(msgs)-1] + if lastC.Status != "action_281" { + t.Fatalf("Last action is %s, must be action_281", lastC.Status) + } +} + +// https://github.com/docker/docker/issues/20999 +// Fixtures: +// +//2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) +//2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge) +//2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) +func TestLoadBufferedEvents(t *testing.T) { + now := time.Now() + f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) + if err != nil { + t.Fatal(err) + } + s, sNano, err := timetypes.ParseTimestamps(f, -1) + if err != nil { + t.Fatal(err) + } + + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Unix(s, sNano) + until := time.Time{} + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 1 { + t.Fatalf("expected 1 message, got %d: %v", len(out), out) + } +} + +func TestLoadBufferedEventsOnlyFromPast(t *testing.T) { + now := time.Now() + f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.090000000+02:00", now) + if err != nil { + t.Fatal(err) + } + s, sNano, err := timetypes.ParseTimestamps(f, 0) + if err != nil { + t.Fatal(err) + } + + f, err = timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) + if err != nil { + t.Fatal(err) + } + u, uNano, err := timetypes.ParseTimestamps(f, 0) + if err != nil { + t.Fatal(err) + } + + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Unix(s, sNano) + until := time.Unix(u, uNano) + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 1 { + t.Fatalf("expected 1 message, got %d: %v", len(out), out) + } + + if out[0].Type != "network" { + t.Fatalf("expected network event, got %s", out[0].Type) + } +} + +// #13753 +func TestIngoreBufferedWhenNoTimes(t *testing.T) { + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Time{} + until := time.Time{} + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 0 { + t.Fatalf("expected 0 buffered events, got %q", out) + } +} diff --git a/components/engine/daemon/events/filter.go b/components/engine/daemon/events/filter.go new file mode 100644 index 0000000000..7f1a5fda1c --- /dev/null +++ b/components/engine/daemon/events/filter.go @@ -0,0 +1,130 @@ +package events + +import ( + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" +) + +// Filter can filter out docker events from a stream +type Filter struct { + filter filters.Args +} + +// NewFilter creates a new Filter +func NewFilter(filter filters.Args) *Filter { + return &Filter{filter: filter} +} + +// Include returns true when the event ev is included by the filters +func (ef *Filter) Include(ev events.Message) bool { + return ef.matchEvent(ev) && + ef.filter.ExactMatch("type", ev.Type) && + ef.matchScope(ev.Scope) && + ef.matchDaemon(ev) && + ef.matchContainer(ev) && + ef.matchPlugin(ev) && + ef.matchVolume(ev) && + ef.matchNetwork(ev) && + ef.matchImage(ev) && + ef.matchLabels(ev.Actor.Attributes) +} + +func (ef *Filter) matchEvent(ev events.Message) bool { + // #25798 if an event filter contains either health_status, exec_create or exec_start without a colon + // Let's to a FuzzyMatch instead of an ExactMatch. + if ef.filterContains("event", map[string]struct{}{"health_status": {}, "exec_create": {}, "exec_start": {}}) { + return ef.filter.FuzzyMatch("event", ev.Action) + } + return ef.filter.ExactMatch("event", ev.Action) +} + +func (ef *Filter) filterContains(field string, values map[string]struct{}) bool { + for _, v := range ef.filter.Get(field) { + if _, ok := values[v]; ok { + return true + } + } + return false +} + +func (ef *Filter) matchScope(scope string) bool { + if !ef.filter.Include("scope") { + return true + } + return ef.filter.ExactMatch("scope", scope) +} + +func (ef *Filter) matchLabels(attributes map[string]string) bool { + if !ef.filter.Include("label") { + return true + } + return ef.filter.MatchKVList("label", attributes) +} + +func (ef *Filter) matchDaemon(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.DaemonEventType) +} + +func (ef *Filter) matchContainer(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ContainerEventType) +} + +func (ef *Filter) matchPlugin(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.PluginEventType) +} + +func (ef *Filter) matchVolume(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.VolumeEventType) +} + +func (ef *Filter) matchNetwork(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NetworkEventType) +} + +func (ef *Filter) matchService(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ServiceEventType) +} + +func (ef *Filter) matchNode(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NodeEventType) +} + +func (ef *Filter) matchSecret(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.SecretEventType) +} + +func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { + return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || + ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) +} + +// matchImage matches against both event.Actor.ID (for image events) +// and event.Actor.Attributes["image"] (for container events), so that any container that was created +// from an image will be included in the image events. Also compare both +// against the stripped repo name without any tags. +func (ef *Filter) matchImage(ev events.Message) bool { + id := ev.Actor.ID + nameAttr := "image" + var imageName string + + if ev.Type == events.ImageEventType { + nameAttr = "name" + } + + if n, ok := ev.Actor.Attributes[nameAttr]; ok { + imageName = n + } + return ef.filter.ExactMatch("image", id) || + ef.filter.ExactMatch("image", imageName) || + ef.filter.ExactMatch("image", stripTag(id)) || + ef.filter.ExactMatch("image", stripTag(imageName)) +} + +func stripTag(image string) string { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return image + } + return reference.FamiliarName(ref) +} diff --git a/components/engine/daemon/events/metrics.go b/components/engine/daemon/events/metrics.go new file mode 100644 index 0000000000..c9a89ec0ed --- /dev/null +++ b/components/engine/daemon/events/metrics.go @@ -0,0 +1,15 @@ +package events + +import "github.com/docker/go-metrics" + +var ( + eventsCounter metrics.Counter + eventSubscribers metrics.Gauge +) + +func init() { + ns := metrics.NewNamespace("engine", "daemon", nil) + eventsCounter = ns.NewCounter("events", "The number of events logged") + eventSubscribers = ns.NewGauge("events_subscribers", "The number of current subscribers to events", metrics.Total) + metrics.Register(ns) +} diff --git a/components/engine/daemon/events/testutils/testutils.go b/components/engine/daemon/events/testutils/testutils.go new file mode 100644 index 0000000000..3544446e18 --- /dev/null +++ b/components/engine/daemon/events/testutils/testutils.go @@ -0,0 +1,76 @@ +package testutils + +import ( + "fmt" + "regexp" + "strings" + "time" + + "github.com/docker/docker/api/types/events" + timetypes "github.com/docker/docker/api/types/time" +) + +var ( + reTimestamp = `(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{9}(:?(:?(:?-|\+)\d{2}:\d{2})|Z))` + reEventType = `(?P\w+)` + reAction = `(?P\w+)` + reID = `(?P[^\s]+)` + reAttributes = `(\s\((?P[^\)]+)\))?` + reString = fmt.Sprintf(`\A%s\s%s\s%s\s%s%s\z`, reTimestamp, reEventType, reAction, reID, reAttributes) + + // eventCliRegexp is a regular expression that matches all possible event outputs in the cli + eventCliRegexp = regexp.MustCompile(reString) +) + +// ScanMap turns an event string like the default ones formatted in the cli output +// and turns it into map. +func ScanMap(text string) map[string]string { + matches := eventCliRegexp.FindAllStringSubmatch(text, -1) + md := map[string]string{} + if len(matches) == 0 { + return md + } + + names := eventCliRegexp.SubexpNames() + for i, n := range matches[0] { + md[names[i]] = n + } + return md +} + +// Scan turns an event string like the default ones formatted in the cli output +// and turns it into an event message. +func Scan(text string) (*events.Message, error) { + md := ScanMap(text) + if len(md) == 0 { + return nil, fmt.Errorf("text is not an event: %s", text) + } + + f, err := timetypes.GetTimestamp(md["timestamp"], time.Now()) + if err != nil { + return nil, err + } + + t, tn, err := timetypes.ParseTimestamps(f, -1) + if err != nil { + return nil, err + } + + attrs := make(map[string]string) + for _, a := range strings.SplitN(md["attributes"], ", ", -1) { + kv := strings.SplitN(a, "=", 2) + attrs[kv[0]] = kv[1] + } + + tu := time.Unix(t, tn) + return &events.Message{ + Time: t, + TimeNano: tu.UnixNano(), + Type: md["eventType"], + Action: md["action"], + Actor: events.Actor{ + ID: md["id"], + Attributes: attrs, + }, + }, nil +} diff --git a/components/engine/daemon/events_test.go b/components/engine/daemon/events_test.go new file mode 100644 index 0000000000..aa78664b23 --- /dev/null +++ b/components/engine/daemon/events_test.go @@ -0,0 +1,94 @@ +package daemon + +import ( + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" +) + +func TestLogContainerEventCopyLabels(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + container := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "node": "1", + "os": "alpine", + }, + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + daemon.LogContainerEvent(container, "create") + + if _, mutated := container.Config.Labels["image"]; mutated { + t.Fatalf("Expected to not mutate the container labels, got %q", container.Config.Labels) + } + + validateTestAttributes(t, l, map[string]string{ + "node": "1", + "os": "alpine", + }) +} + +func TestLogContainerEventWithAttributes(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + container := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Labels: map[string]string{ + "node": "1", + "os": "alpine", + }, + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + attributes := map[string]string{ + "node": "2", + "foo": "bar", + } + daemon.LogContainerEventWithAttributes(container, "create", attributes) + + validateTestAttributes(t, l, map[string]string{ + "node": "1", + "foo": "bar", + }) +} + +func validateTestAttributes(t *testing.T, l chan interface{}, expectedAttributesToTest map[string]string) { + select { + case ev := <-l: + event, ok := ev.(eventtypes.Message) + if !ok { + t.Fatalf("Unexpected event message: %q", ev) + } + for key, expected := range expectedAttributesToTest { + actual, ok := event.Actor.Attributes[key] + if !ok || actual != expected { + t.Fatalf("Expected value for key %s to be %s, but was %s (event:%v)", key, expected, actual, event) + } + } + case <-time.After(10 * time.Second): + t.Fatal("LogEvent test timed out") + } +} diff --git a/components/engine/daemon/exec.go b/components/engine/daemon/exec.go new file mode 100644 index 0000000000..72d01c8c23 --- /dev/null +++ b/components/engine/daemon/exec.go @@ -0,0 +1,299 @@ +package daemon + +import ( + "fmt" + "io" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" +) + +// Seconds to wait after sending TERM before trying KILL +const termProcessTimeout = 10 + +func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { + // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. + container.ExecCommands.Add(config.ID, config) + // Storing execs in daemon for easy access via Engine API. + d.execCommands.Add(config.ID, config) +} + +// ExecExists looks up the exec instance and returns a bool if it exists or not. +// It will also return the error produced by `getConfig` +func (d *Daemon) ExecExists(name string) (bool, error) { + if _, err := d.getExecConfig(name); err != nil { + return false, err + } + return true, nil +} + +// getExecConfig looks up the exec instance by name. If the container associated +// with the exec instance is stopped or paused, it will return an error. +func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { + ec := d.execCommands.Get(name) + + // If the exec is found but its container is not in the daemon's list of + // containers then it must have been deleted, in which case instead of + // saying the container isn't running, we should return a 404 so that + // the user sees the same error now that they will after the + // 5 minute clean-up loop is run which erases old/dead execs. + + if ec != nil { + if container := d.containers.Get(ec.ContainerID); container != nil { + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) + } + if container.IsPaused() { + return nil, errExecPaused(container.ID) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return ec, nil + } + } + + return nil, errExecNotFound(name) +} + +func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { + container.ExecCommands.Delete(execConfig.ID) + d.execCommands.Delete(execConfig.ID) +} + +func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { + container, err := d.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + if container.IsPaused() { + return nil, errExecPaused(name) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return container, nil +} + +// ContainerExecCreate sets up an exec in a running container. +func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { + cntr, err := d.getActiveContainer(name) + if err != nil { + return "", err + } + + cmd := strslice.StrSlice(config.Cmd) + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd) + + keys := []byte{} + if config.DetachKeys != "" { + keys, err = term.ToBytes(config.DetachKeys) + if err != nil { + err = fmt.Errorf("Invalid escape keys (%s) provided", config.DetachKeys) + return "", err + } + } + + execConfig := exec.NewConfig() + execConfig.OpenStdin = config.AttachStdin + execConfig.OpenStdout = config.AttachStdout + execConfig.OpenStderr = config.AttachStderr + execConfig.ContainerID = cntr.ID + execConfig.DetachKeys = keys + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = config.Tty + execConfig.Privileged = config.Privileged + execConfig.User = config.User + + linkedEnv, err := d.setupLinkedContainers(cntr) + if err != nil { + return "", err + } + execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(config.Tty, linkedEnv), config.Env) + if len(execConfig.User) == 0 { + execConfig.User = cntr.Config.User + } + + d.registerExecCommand(cntr, execConfig) + + d.LogContainerEvent(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + return execConfig.ID, nil +} + +// ContainerExecStart starts a previously set up exec instance. The +// std streams are set up. +// If ctx is cancelled, the process is terminated. +func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) (err error) { + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + ) + + ec, err := d.getExecConfig(name) + if err != nil { + return errExecNotFound(name) + } + + ec.Lock() + if ec.ExitCode != nil { + ec.Unlock() + err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) + return errors.NewRequestConflictError(err) + } + + if ec.Running { + ec.Unlock() + return fmt.Errorf("Error: Exec command %s is already running", ec.ID) + } + ec.Running = true + ec.Unlock() + + c := d.containers.Get(ec.ContainerID) + logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) + d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")) + + defer func() { + if err != nil { + ec.Lock() + ec.Running = false + exitCode := 126 + ec.ExitCode = &exitCode + if err := ec.CloseStreams(); err != nil { + logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err) + } + ec.Unlock() + c.ExecCommands.Delete(ec.ID) + } + }() + + if ec.OpenStdin && stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debug("Closing buffered stdin pipe") + pools.Copy(w, stdin) + }() + cStdin = r + } + if ec.OpenStdout { + cStdout = stdout + } + if ec.OpenStderr { + cStderr = stderr + } + + if ec.OpenStdin { + ec.StreamConfig.NewInputPipes() + } else { + ec.StreamConfig.NewNopInputPipe() + } + + p := libcontainerd.Process{ + Args: append([]string{ec.Entrypoint}, ec.Args...), + Env: ec.Env, + Terminal: ec.Tty, + } + + if err := execSetPlatformOpt(c, ec, &p); err != nil { + return err + } + + attachConfig := stream.AttachConfig{ + TTY: ec.Tty, + UseStdin: cStdin != nil, + UseStdout: cStdout != nil, + UseStderr: cStderr != nil, + Stdin: cStdin, + Stdout: cStdout, + Stderr: cStderr, + DetachKeys: ec.DetachKeys, + CloseStdin: true, + } + ec.StreamConfig.AttachStreams(&attachConfig) + attachErr := ec.StreamConfig.CopyStreams(ctx, &attachConfig) + + systemPid, err := d.containerd.AddProcess(ctx, c.ID, name, p, ec.InitializeStdio) + if err != nil { + return err + } + ec.Lock() + ec.Pid = systemPid + ec.Unlock() + + select { + case <-ctx.Done(): + logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID) + d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["TERM"])) + select { + case <-time.After(termProcessTimeout * time.Second): + logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout) + d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["KILL"])) + case <-attachErr: + // TERM signal worked + } + return fmt.Errorf("context cancelled") + case err := <-attachErr: + if err != nil { + if _, ok := err.(term.EscapeError); !ok { + return fmt.Errorf("exec attach failed with error: %v", err) + } + d.LogContainerEvent(c, "exec_detach") + } + } + return nil +} + +// execCommandGC runs a ticker to clean up the daemon references +// of exec configs that are no longer part of the container. +func (d *Daemon) execCommandGC() { + for range time.Tick(5 * time.Minute) { + var ( + cleaned int + liveExecCommands = d.containerExecIds() + ) + for id, config := range d.execCommands.Commands() { + if config.CanRemove { + cleaned++ + d.execCommands.Delete(id) + } else { + if _, exists := liveExecCommands[id]; !exists { + config.CanRemove = true + } + } + } + if cleaned > 0 { + logrus.Debugf("clean %d unused exec commands", cleaned) + } + } +} + +// containerExecIds returns a list of all the current exec ids that are in use +// and running inside a container. +func (d *Daemon) containerExecIds() map[string]struct{} { + ids := map[string]struct{}{} + for _, c := range d.containers.List() { + for _, id := range c.ExecCommands.List() { + ids[id] = struct{}{} + } + } + return ids +} diff --git a/components/engine/daemon/exec/exec.go b/components/engine/daemon/exec/exec.go new file mode 100644 index 0000000000..933136f965 --- /dev/null +++ b/components/engine/daemon/exec/exec.go @@ -0,0 +1,118 @@ +package exec + +import ( + "runtime" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/stringid" +) + +// Config holds the configurations for execs. The Daemon keeps +// track of both running and finished execs so that they can be +// examined both during and after completion. +type Config struct { + sync.Mutex + StreamConfig *stream.Config + ID string + Running bool + ExitCode *int + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Entrypoint string + Args []string + Tty bool + Privileged bool + User string + Env []string + Pid int +} + +// NewConfig initializes the a new exec configuration +func NewConfig() *Config { + return &Config{ + ID: stringid.GenerateNonCryptoID(), + StreamConfig: stream.NewConfig(), + } +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (c *Config) InitializeStdio(iop libcontainerd.IOPipe) error { + c.StreamConfig.CopyToPipe(iop) + + if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Errorf("error closing exec stdin: %+v", err) + } + } + } + + return nil +} + +// CloseStreams closes the stdio streams for the exec +func (c *Config) CloseStreams() error { + return c.StreamConfig.CloseStreams() +} + +// Store keeps track of the exec configurations. +type Store struct { + commands map[string]*Config + sync.RWMutex +} + +// NewStore initializes a new exec store. +func NewStore() *Store { + return &Store{commands: make(map[string]*Config, 0)} +} + +// Commands returns the exec configurations in the store. +func (e *Store) Commands() map[string]*Config { + e.RLock() + commands := make(map[string]*Config, len(e.commands)) + for id, config := range e.commands { + commands[id] = config + } + e.RUnlock() + return commands +} + +// Add adds a new exec configuration to the store. +func (e *Store) Add(id string, Config *Config) { + e.Lock() + e.commands[id] = Config + e.Unlock() +} + +// Get returns an exec configuration by its id. +func (e *Store) Get(id string) *Config { + e.RLock() + res := e.commands[id] + e.RUnlock() + return res +} + +// Delete removes an exec configuration from the store. +func (e *Store) Delete(id string) { + e.Lock() + delete(e.commands, id) + e.Unlock() +} + +// List returns the list of exec ids in the store. +func (e *Store) List() []string { + var IDs []string + e.RLock() + for id := range e.commands { + IDs = append(IDs, id) + } + e.RUnlock() + return IDs +} diff --git a/components/engine/daemon/exec_linux.go b/components/engine/daemon/exec_linux.go new file mode 100644 index 0000000000..bb11c11e44 --- /dev/null +++ b/components/engine/daemon/exec_linux.go @@ -0,0 +1,50 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + if len(ec.User) > 0 { + uid, gid, additionalGids, err := getUser(c, ec.User) + if err != nil { + return err + } + p.User = &specs.User{ + UID: uid, + GID: gid, + AdditionalGids: additionalGids, + } + } + if ec.Privileged { + p.Capabilities = caps.GetAllCapabilities() + } + if apparmor.IsEnabled() { + var appArmorProfile string + if c.AppArmorProfile != "" { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + appArmorProfile = "unconfined" + } else { + appArmorProfile = "docker-default" + } + + if appArmorProfile == "docker-default" { + // Unattended upgrades and other fun services can unload AppArmor + // profiles inadvertently. Since we cannot store our profile in + // /etc/apparmor.d, nor can we practically add other ways of + // telling the system to keep our profile loaded, in order to make + // sure that we keep the default profile enabled we dynamically + // reload it if necessary. + if err := ensureDefaultAppArmorProfile(); err != nil { + return err + } + } + } + return nil +} diff --git a/components/engine/daemon/exec_solaris.go b/components/engine/daemon/exec_solaris.go new file mode 100644 index 0000000000..7003355d91 --- /dev/null +++ b/components/engine/daemon/exec_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + return nil +} diff --git a/components/engine/daemon/exec_windows.go b/components/engine/daemon/exec_windows.go new file mode 100644 index 0000000000..1d6974cda9 --- /dev/null +++ b/components/engine/daemon/exec_windows.go @@ -0,0 +1,14 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + // Process arguments need to be escaped before sending to OCI. + p.Args = escapeArgs(p.Args) + p.User.Username = ec.User + return nil +} diff --git a/components/engine/daemon/export.go b/components/engine/daemon/export.go new file mode 100644 index 0000000000..5ef6dbb0e5 --- /dev/null +++ b/components/engine/daemon/export.go @@ -0,0 +1,60 @@ +package daemon + +import ( + "fmt" + "io" + "runtime" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerExport writes the contents of the container to the given +// writer. An error is returned if the container cannot be found. +func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { + if runtime.GOOS == "windows" { + return fmt.Errorf("the daemon on this platform does not support export of a container") + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + data, err := daemon.containerExport(container) + if err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(out, data); err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + return nil +} + +func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCloser, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + }) + if err != nil { + daemon.Unmount(container) + return nil, err + } + arch := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + daemon.Unmount(container) + return err + }) + daemon.LogContainerEvent(container, "export") + return arch, err +} diff --git a/components/engine/daemon/getsize_unix.go b/components/engine/daemon/getsize_unix.go new file mode 100644 index 0000000000..bd088fd41c --- /dev/null +++ b/components/engine/daemon/getsize_unix.go @@ -0,0 +1,41 @@ +// +build linux freebsd solaris + +package daemon + +import ( + "github.com/Sirupsen/logrus" +) + +// getSize returns the real size & virtual size of the container. +func (daemon *Daemon) getSize(containerID string) (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + ) + + rwlayer, err := daemon.layerStore.GetRWLayer(containerID) + if err != nil { + logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err) + return sizeRw, sizeRootfs + } + defer daemon.layerStore.ReleaseRWLayer(rwlayer) + + sizeRw, err = rwlayer.Size() + if err != nil { + logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", + daemon.GraphDriverName(), containerID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + + if parent := rwlayer.Parent(); parent != nil { + sizeRootfs, err = parent.Size() + if err != nil { + sizeRootfs = -1 + } else if sizeRw != -1 { + sizeRootfs += sizeRw + } + } + return sizeRw, sizeRootfs +} diff --git a/components/engine/daemon/graphdriver/aufs/aufs.go b/components/engine/daemon/graphdriver/aufs/aufs.go new file mode 100644 index 0000000000..85cd5d1d26 --- /dev/null +++ b/components/engine/daemon/graphdriver/aufs/aufs.go @@ -0,0 +1,638 @@ +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + mountpk "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/system" + + rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/selinux/go-selinux/label" +) + +var ( + // ErrAufsNotSupported is returned if aufs is not supported by the host. + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + // ErrAufsNested means aufs cannot be used bc we are in a user namespace + ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +func init() { + graphdriver.Register("aufs", Init) +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + sync.Mutex + root string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + pathCacheLock sync.Mutex + pathCache map[string]string + naiveDiff graphdriver.DiffDriver + locker *locker.Locker +} + +// Init returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(root) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("AUFS is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + uidMaps: uidMaps, + gidMaps: gidMaps, + pathCache: make(map[string]string), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + locker: locker.New(), + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(root); err != nil { + return nil, err + } + + // Populate the dir structure + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { + return nil, err + } + } + + a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps) + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + if rsystem.RunningInUserNS() { + return ErrAufsNested + } + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +// GetMetadata not implemented +func (a *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return a.Create(id, parent, opts) +} + +// Create three folders for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for aufs") + } + + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIDs(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + + return nil +} + +// createDirsFor creates two directories for the given id. +// mnt and diff +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) + if err != nil { + return err + } + // Directory permission is 0755. + // The path of directories are /mnt/ + // and /diff/ + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { + return err + } + } + return nil +} + +// Remove will unmount and remove the given id. +func (a *Driver) Remove(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + mountpoint, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + if !exists { + mountpoint = a.getMountpoint(id) + } + + var retries int + for { + mounted, err := a.mounted(mountpoint) + if err != nil { + return err + } + if !mounted { + break + } + + if err := a.unmount(mountpoint); err != nil { + if err != syscall.EBUSY { + return fmt.Errorf("aufs: unmount error: %s: %v", mountpoint, err) + } + if retries >= 5 { + return fmt.Errorf("aufs: unmount error after retries: %s: %v", mountpoint, err) + } + // If unmount returns EBUSY, it could be a transient error. Sleep and retry. + retries++ + logrus.Warnf("unmount failed due to EBUSY: retry count: %d", retries) + time.Sleep(100 * time.Millisecond) + continue + } + break + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { + if err == syscall.EBUSY { + logrus.Warn("os.Rename err due to EBUSY") + } + return err + } + defer system.EnsureRemoveAll(tmpMntPath) + + tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) { + return err + } + defer system.EnsureRemoveAll(tmpDiffpath) + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err + } + + a.pathCacheLock.Lock() + delete(a.pathCache, id) + a.pathCacheLock.Unlock() + return nil +} + +// Get returns the rootfs path for the id. +// This will mount the dir at its given path +func (a *Driver) Get(id, mountLabel string) (string, error) { + a.locker.Lock(id) + defer a.locker.Unlock(id) + parents, err := a.getParentLayerPaths(id) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + + if !exists { + m = a.getDiffPath(id) + if len(parents) > 0 { + m = a.getMountpoint(id) + } + } + if count := a.ctr.Increment(m); count > 1 { + return m, nil + } + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + if len(parents) > 0 { + if err := a.mount(id, m, mountLabel, parents); err != nil { + return "", err + } + } + + a.pathCacheLock.Lock() + a.pathCache[id] = m + a.pathCacheLock.Unlock() + return m, nil +} + +// Put unmounts and updates list of active mounts. +func (a *Driver) Put(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + if count := a.ctr.Decrement(m); count > 0 { + return nil + } + + err := a.unmount(m) + if err != nil { + logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + } + return err +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (a *Driver) isParent(id, parent string) bool { + parents, _ := getParentIDs(a.rootPath(), id) + if parent == "" && len(parents) > 0 { + return false + } + return !(len(parents) > 0 && parent != parents[0]) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Diff(id, parent) + } + + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := path.Join(a.rootPath(), "diff", id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +func (a *Driver) applyDiff(id string, diff io.Reader) error { + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id, parent string) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.DiffSize(id, parent) + } + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.ApplyDiff(id, parent, diff) + } + + // AUFS doesn't need the parent id to apply the diff if it is the direct parent. + if err = a.applyDiff(id, diff); err != nil { + return + } + + return a.DiffSize(id, parent) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Changes(id, parent) + } + + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIDs(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error { + a.Lock() + defer a.Unlock() + + // If the id is mounted or we get an error return + if mounted, err := a.mounted(target); err != nil || mounted { + return err + } + + rw := a.getDiffPath(id) + + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + } + return nil +} + +func (a *Driver) unmount(mountPath string) error { + a.Lock() + defer a.Unlock() + + if mounted, err := a.mounted(mountPath); err != nil || !mounted { + return err + } + if err := Unmount(mountPath); err != nil { + return err + } + return nil +} + +func (a *Driver) mounted(mountpoint string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) +} + +// Cleanup aufs and unmount all mountpoints +func (a *Driver) Cleanup() error { + var dirs []string + if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + dirs = append(dirs, path) + return nil + }); err != nil { + return err + } + + for _, m := range dirs { + if err := a.unmount(m); err != nil { + logrus.Debugf("aufs error unmounting %s: %s", m, err) + } + } + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len("dirperm1") + } + b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + index := 0 + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } + + opts := "dio,xino=/dev/shm/aufs.xino" + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { + return + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := ioutil.TempDir("", "docker-aufs-base") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := ioutil.TempDir("", "docker-aufs-union") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("error checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} diff --git a/components/engine/daemon/graphdriver/aufs/aufs_test.go b/components/engine/daemon/graphdriver/aufs/aufs_test.go new file mode 100644 index 0000000000..baf0fd89f2 --- /dev/null +++ b/components/engine/daemon/graphdriver/aufs/aufs_test.go @@ -0,0 +1,802 @@ +// +build linux + +package aufs + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "path" + "sync" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/stringid" +) + +var ( + tmpOuter = path.Join(os.TempDir(), "aufs-tests") + tmp = path.Join(tmpOuter, "aufs") +) + +func init() { + reexec.Init() +} + +func testInit(dir string, t testing.TB) graphdriver.Driver { + d, err := Init(dir, nil, nil, nil) + if err != nil { + if err == graphdriver.ErrNotSupported { + t.Skip(err) + } else { + t.Fatal(err) + } + } + return d +} + +func newDriver(t testing.TB) *Driver { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + return d.(*Driver) +} + +func TestNewDriver(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + defer os.RemoveAll(tmp) + if d == nil { + t.Fatal("Driver should not be nil") + } +} + +func TestAufsString(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if d.String() != "aufs" { + t.Fatalf("Expected aufs got %s", d.String()) + } +} + +func TestCreateDirStructure(t *testing.T) { + newDriver(t) + defer os.RemoveAll(tmp) + + paths := []string{ + "mnt", + "layers", + "diff", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p)); err != nil { + t.Fatal(err) + } + } +} + +// We should be able to create two drivers with the same dir structure +func TestNewDriverFromExistingDir(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + testInit(tmp, t) + testInit(tmp, t) + os.RemoveAll(tmp) +} + +func TestCreateNewDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } +} + +func TestCreateNewDirStructure(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { + t.Fatal(err) + } + } +} + +func TestRemoveImage(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.Remove("1"); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { + t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) + } + } +} + +func TestGetWithoutParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + expected := path.Join(tmp, "diff", "1") + if diffPath != expected { + t.Fatalf("Expected path %s got %s", expected, diffPath) + } +} + +func TestCleanupWithNoDirs(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestCleanupWithDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestMountedFalseResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + response, err := d.mounted(d.getDiffPath("1")) + if err != nil { + t.Fatal(err) + } + + if response != false { + t.Fatal("Response if dir id 1 is mounted should be false") + } +} + +func TestMountedTrueResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + _, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + response, err := d.mounted(d.pathCache["2"]) + if err != nil { + t.Fatal(err) + } + + if response != true { + t.Fatal("Response if dir id 2 is mounted should be true") + } +} + +func TestMountWithParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + expected := path.Join(tmp, "mnt", "2") + if mntPath != expected { + t.Fatalf("Expected %s got %s", expected, mntPath) + } +} + +func TestRemoveMountedDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + mounted, err := d.mounted(d.pathCache["2"]) + if err != nil { + t.Fatal(err) + } + + if !mounted { + t.Fatal("Dir id 2 should be mounted") + } + + if err := d.Remove("2"); err != nil { + t.Fatal(err) + } +} + +func TestCreateWithInvalidParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "docker", nil); err == nil { + t.Fatal("Error should not be nil with parent does not exist") + } +} + +func TestGetDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + a, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + if a == nil { + t.Fatal("Archive should not be nil") + } +} + +func TestChanges(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.CreateReadWrite("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPoint, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err := os.Create(path.Join(mntPoint, "test.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err := d.Changes("2", "") + if err != nil { + t.Fatal(err) + } + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change := changes[0] + + expectedPath := "/test.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } + + if err := d.CreateReadWrite("3", "2", nil); err != nil { + t.Fatal(err) + } + mntPoint, err = d.Get("3", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err = os.Create(path.Join(mntPoint, "test2.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err = d.Changes("3", "2") + if err != nil { + t.Fatal(err) + } + + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change = changes[0] + + expectedPath = "/test2.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } +} + +func TestDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } +} + +func TestChildDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } + + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + diffSize, err = d.DiffSize("2", "1") + if err != nil { + t.Fatal(err) + } + // The diff size for the child should be zero + if diffSize != 0 { + t.Fatalf("Expected size to be %d got %d", 0, diffSize) + } +} + +func TestExists(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if d.Exists("none") { + t.Fatal("id none should not exist in the driver") + } + + if !d.Exists("1") { + t.Fatal("id 1 should exist in the driver") + } +} + +func TestStatus(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + status := d.Status() + if status == nil || len(status) == 0 { + t.Fatal("Status should not be nil or empty") + } + rootDir := status[0] + dirs := status[2] + if rootDir[0] != "Root Dir" { + t.Fatalf("Expected Root Dir got %s", rootDir[0]) + } + if rootDir[1] != d.rootPath() { + t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) + } + if dirs[0] != "Dirs" { + t.Fatalf("Expected Dirs got %s", dirs[0]) + } + if dirs[1] != "1" { + t.Fatalf("Expected 1 got %s", dirs[1]) + } +} + +func TestApplyDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + diff, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + + if err := d.Create("2", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("3", "2", nil); err != nil { + t.Fatal(err) + } + + if err := d.applyDiff("3", diff); err != nil { + t.Fatal(err) + } + + // Ensure that the file is in the mount point for id 3 + + mountPoint, err := d.Get("3", "") + if err != nil { + t.Fatal(err) + } + if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { + t.Fatal(err) + } +} + +func hash(c string) string { + h := sha256.New() + fmt.Fprint(h, c) + return hex.EncodeToString(h.Sum(nil)) +} + +func testMountMoreThan42Layers(t *testing.T, mountPath string) { + if err := os.MkdirAll(mountPath, 0755); err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(mountPath) + d := testInit(mountPath, t).(*Driver) + defer d.Cleanup() + var last string + var expected int + + for i := 1; i < 127; i++ { + expected++ + var ( + parent = fmt.Sprintf("%d", i-1) + current = fmt.Sprintf("%d", i) + ) + + if parent == "0" { + parent = "" + } else { + parent = hash(parent) + } + current = hash(current) + + if err := d.CreateReadWrite(current, parent, nil); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + point, err := d.Get(current, "") + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f, err := os.Create(path.Join(point, current)) + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f.Close() + + if i%10 == 0 { + if err := os.Remove(path.Join(point, parent)); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + expected-- + } + last = current + } + + // Perform the actual mount for the top most image + point, err := d.Get(last, "") + if err != nil { + t.Error(err) + } + files, err := ioutil.ReadDir(point) + if err != nil { + t.Error(err) + } + if len(files) != expected { + t.Errorf("Expected %d got %d", expected, len(files)) + } +} + +func TestMountMoreThan42Layers(t *testing.T) { + os.RemoveAll(tmpOuter) + testMountMoreThan42Layers(t, tmp) +} + +func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { + defer os.RemoveAll(tmpOuter) + zeroes := "0" + for { + // This finds a mount path so that when combined into aufs mount options + // 4096 byte boundary would be in between the paths or in permission + // section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs' + mountPath := path.Join(tmpOuter, zeroes, "aufs") + pathLength := 77 + len(mountPath) + + if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { + t.Logf("Using path: %s", mountPath) + testMountMoreThan42Layers(t, mountPath) + return + } + zeroes += "0" + } +} + +func BenchmarkConcurrentAccess(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + d := newDriver(b) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + numConcurrent := 256 + // create a bunch of ids + var ids []string + for i := 0; i < numConcurrent; i++ { + ids = append(ids, stringid.GenerateNonCryptoID()) + } + + if err := d.Create(ids[0], "", nil); err != nil { + b.Fatal(err) + } + + if err := d.Create(ids[1], ids[0], nil); err != nil { + b.Fatal(err) + } + + parent := ids[1] + ids = append(ids[2:]) + + chErr := make(chan error, numConcurrent) + var outerGroup sync.WaitGroup + outerGroup.Add(len(ids)) + b.StartTimer() + + // here's the actual bench + for _, id := range ids { + go func(id string) { + defer outerGroup.Done() + if err := d.Create(id, parent, nil); err != nil { + b.Logf("Create %s failed", id) + chErr <- err + return + } + var innerGroup sync.WaitGroup + for i := 0; i < b.N; i++ { + innerGroup.Add(1) + go func() { + d.Get(id, "") + d.Put(id) + innerGroup.Done() + }() + } + innerGroup.Wait() + d.Remove(id) + }(id) + } + + outerGroup.Wait() + b.StopTimer() + close(chErr) + for err := range chErr { + if err != nil { + b.Log(err) + b.Fail() + } + } +} diff --git a/components/engine/daemon/graphdriver/aufs/dirs.go b/components/engine/daemon/graphdriver/aufs/dirs.go new file mode 100644 index 0000000000..d2325fc46c --- /dev/null +++ b/components/engine/daemon/graphdriver/aufs/dirs.go @@ -0,0 +1,64 @@ +// +build linux + +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIDs(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} + +func (a *Driver) getMountpoint(id string) string { + return path.Join(a.mntPath(), id) +} + +func (a *Driver) mntPath() string { + return path.Join(a.rootPath(), "mnt") +} + +func (a *Driver) getDiffPath(id string) string { + return path.Join(a.diffPath(), id) +} + +func (a *Driver) diffPath() string { + return path.Join(a.rootPath(), "diff") +} diff --git a/components/engine/daemon/graphdriver/aufs/mount.go b/components/engine/daemon/graphdriver/aufs/mount.go new file mode 100644 index 0000000000..da1e892f44 --- /dev/null +++ b/components/engine/daemon/graphdriver/aufs/mount.go @@ -0,0 +1,21 @@ +// +build linux + +package aufs + +import ( + "os/exec" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Unmount the target specified. +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) + } + if err := syscall.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/components/engine/daemon/graphdriver/aufs/mount_linux.go b/components/engine/daemon/graphdriver/aufs/mount_linux.go new file mode 100644 index 0000000000..8062bae420 --- /dev/null +++ b/components/engine/daemon/graphdriver/aufs/mount_linux.go @@ -0,0 +1,7 @@ +package aufs + +import "syscall" + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} diff --git a/components/engine/daemon/graphdriver/aufs/mount_unsupported.go b/components/engine/daemon/graphdriver/aufs/mount_unsupported.go new file mode 100644 index 0000000000..d030b06637 --- /dev/null +++ b/components/engine/daemon/graphdriver/aufs/mount_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package aufs + +import "errors" + +// MsRemount declared to specify a non-linux system mount. +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on this platform") +} diff --git a/components/engine/daemon/graphdriver/btrfs/btrfs.go b/components/engine/daemon/graphdriver/btrfs/btrfs.go new file mode 100644 index 0000000000..2ff41a0b8b --- /dev/null +++ b/components/engine/daemon/graphdriver/btrfs/btrfs.go @@ -0,0 +1,575 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +#include + +static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { + snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); +} +*/ +import "C" + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + "syscall" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/system" + "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +type btrfsOptions struct { + minSpace uint64 + size uint64 +} + +// Init returns a new BTRFS driver. +// An error is returned if BTRFS is not supported. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + + if fsMagic != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + opt, userDiskQuota, err := parseOptions(options) + if err != nil { + return nil, err + } + + driver := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + options: opt, + } + + if userDiskQuota { + if err := driver.subvolEnableQuota(); err != nil { + return nil, err + } + } + + return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (btrfsOptions, bool, error) { + var options btrfsOptions + userDiskQuota := false + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, userDiskQuota, err + } + key = strings.ToLower(key) + switch key { + case "btrfs.min_space": + minSpace, err := units.RAMInBytes(val) + if err != nil { + return options, userDiskQuota, err + } + userDiskQuota = true + options.minSpace = uint64(minSpace) + default: + return options, userDiskQuota, fmt.Errorf("Unknown option %s", key) + } + } + return options, userDiskQuota, nil +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + //root of the file system + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions + quotaEnabled bool +} + +// String prints the name of the driver (btrfs). +func (d *Driver) String() string { + return "btrfs" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Build Version" and "Library Version" of the btrfs libraries used. +// Version information can be used to check compatibility with your kernel. +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := btrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := btrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +// GetMetadata returns empty metadata for this driver. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup unmounts the home directory. +func (d *Driver) Cleanup() error { + if err := d.subvolDisableQuota(); err != nil { + return err + } + + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + + var cs = C.CString(name) + C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) + C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func isSubvolume(p string) (bool, error) { + var bufStat syscall.Stat_t + if err := syscall.Lstat(p, &bufStat); err != nil { + return false, err + } + + // return true if it is a btrfs subvolume + return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil +} + +func subvolDelete(dirpath, name string) error { + dir, err := openDir(dirpath) + if err != nil { + return err + } + defer closeDir(dir) + fullPath := path.Join(dirpath, name) + + var args C.struct_btrfs_ioctl_vol_args + + // walk the btrfs subvolumes + walkSubvolumes := func(p string, f os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) && p != fullPath { + // missing most likely because the path was a subvolume that got removed in the previous iteration + // since it's gone anyway, we don't care + return nil + } + return fmt.Errorf("error walking subvolumes: %v", err) + } + // we want to check children only so skip itself + // it will be removed after the filepath walk anyways + if f.IsDir() && p != fullPath { + sv, err := isSubvolume(p) + if err != nil { + return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + } + if sv { + if err := subvolDelete(path.Dir(p), f.Name()); err != nil { + return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + } + } + } + return nil + } + if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + } + + // all subvolumes have been removed + // now remove the one originally passed in + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + } + return nil +} + +func (d *Driver) subvolEnableQuota() error { + if d.quotaEnabled { + return nil + } + // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed + if _, err := subvolLookupQgroup(d.home); err == nil { + d.quotaEnabled = true + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_ENABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) + } + + d.quotaEnabled = true + + return nil +} + +func (d *Driver) subvolDisableQuota() error { + if !d.quotaEnabled { + // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed + if _, err := subvolLookupQgroup(d.home); err != nil { + // quota is still not enabled + return nil + } + d.quotaEnabled = true + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_DISABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) + } + + d.quotaEnabled = false + + return nil +} + +func (d *Driver) subvolRescanQuota() error { + if !d.quotaEnabled { + // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed + if _, err := subvolLookupQgroup(d.home); err != nil { + // quota is still not enabled + return nil + } + d.quotaEnabled = true + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_rescan_args + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolLimitQgroup(path string, size uint64) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_qgroup_limit_args + args.lim.max_referenced = C.__u64(size) + args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolLookupQgroup(path string) (uint64, error) { + dir, err := openDir(path) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_ino_lookup_args + args.objectid = C.BTRFS_FIRST_FREE_OBJECTID + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error()) + } + if args.treeid == 0 { + return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir) + } + + return uint64(args.treeid), nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirID(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create the filesystem with given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + subvolumes := path.Join(d.home, "subvolumes") + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir := d.subvolumesDirID(parent) + st, err := os.Stat(parentDir) + if err != nil { + return err + } + if !st.IsDir() { + return fmt.Errorf("%s: not a directory", parentDir) + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if _, ok := storageOpt["size"]; ok { + driver := &Driver{} + if err := d.parseStorageOpt(storageOpt, driver); err != nil { + return err + } + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { + return err + } + } + + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + + mountLabel := "" + if opts != nil { + mountLabel = opts.MountLabel + } + + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) +} + +// Parse btrfs storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to change the subvolume disk quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +// Set btrfs storage size +func (d *Driver) setStorageSize(dir string, driver *Driver) error { + if driver.options.size <= 0 { + return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) + } + if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { + return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) + } + + if err := d.subvolEnableQuota(); err != nil { + return err + } + + if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + return err + } + + return nil +} + +// Remove the filesystem with given id. +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirID(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + if err := system.EnsureRemoveAll(dir); err != nil { + return err + } + if err := d.subvolRescanQuota(); err != nil { + return err + } + return nil +} + +// Get the requested filesystem id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.subvolumesDirID(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +// Put is not implemented for BTRFS as there is no cleanup required for the id. +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +// Exists checks if the id exists in the filesystem. +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirID(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/components/engine/daemon/graphdriver/btrfs/btrfs_test.go b/components/engine/daemon/graphdriver/btrfs/btrfs_test.go new file mode 100644 index 0000000000..0038dbcdcd --- /dev/null +++ b/components/engine/daemon/graphdriver/btrfs/btrfs_test.go @@ -0,0 +1,63 @@ +// +build linux + +package btrfs + +import ( + "os" + "path" + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown +func TestBtrfsSetup(t *testing.T) { + graphtest.GetDriver(t, "btrfs") +} + +func TestBtrfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "btrfs") +} + +func TestBtrfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "btrfs") +} + +func TestBtrfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "btrfs") +} + +func TestBtrfsSubvolDelete(t *testing.T) { + d := graphtest.GetDriver(t, "btrfs") + if err := d.CreateReadWrite("test", "", nil); err != nil { + t.Fatal(err) + } + defer graphtest.PutDriver(t) + + dir, err := d.Get("test", "") + if err != nil { + t.Fatal(err) + } + defer d.Put("test") + + if err := subvolCreate(dir, "subvoltest"); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(path.Join(dir, "subvoltest")); err != nil { + t.Fatal(err) + } + + if err := d.Remove("test"); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(path.Join(dir, "subvoltest")); !os.IsNotExist(err) { + t.Fatalf("expected not exist error on nested subvol, got: %v", err) + } +} + +func TestBtrfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/components/engine/daemon/graphdriver/btrfs/dummy_unsupported.go b/components/engine/daemon/graphdriver/btrfs/dummy_unsupported.go new file mode 100644 index 0000000000..f07088887a --- /dev/null +++ b/components/engine/daemon/graphdriver/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/components/engine/daemon/graphdriver/btrfs/version.go b/components/engine/daemon/graphdriver/btrfs/version.go new file mode 100644 index 0000000000..73d90cdd71 --- /dev/null +++ b/components/engine/daemon/graphdriver/btrfs/version.go @@ -0,0 +1,26 @@ +// +build linux,!btrfs_noversion + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func btrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func btrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/components/engine/daemon/graphdriver/btrfs/version_none.go b/components/engine/daemon/graphdriver/btrfs/version_none.go new file mode 100644 index 0000000000..f802fbc629 --- /dev/null +++ b/components/engine/daemon/graphdriver/btrfs/version_none.go @@ -0,0 +1,14 @@ +// +build linux,btrfs_noversion + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utilities of >= 3.16.1 + +func btrfsBuildVersion() string { + return "-" +} + +func btrfsLibVersion() int { + return -1 +} diff --git a/components/engine/daemon/graphdriver/btrfs/version_test.go b/components/engine/daemon/graphdriver/btrfs/version_test.go new file mode 100644 index 0000000000..d78d577179 --- /dev/null +++ b/components/engine/daemon/graphdriver/btrfs/version_test.go @@ -0,0 +1,13 @@ +// +build linux,!btrfs_noversion + +package btrfs + +import ( + "testing" +) + +func TestLibVersion(t *testing.T) { + if btrfsLibVersion() <= 0 { + t.Error("expected output from btrfs lib version > 0") + } +} diff --git a/components/engine/daemon/graphdriver/counter.go b/components/engine/daemon/graphdriver/counter.go new file mode 100644 index 0000000000..72551a38d4 --- /dev/null +++ b/components/engine/daemon/graphdriver/counter.go @@ -0,0 +1,59 @@ +package graphdriver + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increases the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count++ + }) +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count-- + }) +} + +func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + infoOp(m) + count := m.count + c.mu.Unlock() + return count +} diff --git a/components/engine/daemon/graphdriver/devmapper/README.md b/components/engine/daemon/graphdriver/devmapper/README.md new file mode 100644 index 0000000000..6594fa65f0 --- /dev/null +++ b/components/engine/daemon/graphdriver/devmapper/README.md @@ -0,0 +1,98 @@ +# devicemapper - a storage backend based on Device Mapper + +## Theory of operation + +The device mapper graphdriver uses the device mapper thin provisioning +module (dm-thinp) to implement CoW snapshots. The preferred model is +to have a thin pool reserved outside of Docker and passed to the +daemon via the `--storage-opt dm.thinpooldev` option. Alternatively, +the device mapper graphdriver can setup a block device to handle this +for you via the `--storage-opt dm.directlvm_device` option. + +As a fallback if no thin pool is provided, loopback files will be +created. Loopback is very slow, but can be used without any +pre-configuration of storage. It is strongly recommended that you do +not use loopback in production. Ensure your Docker daemon has a +`--storage-opt dm.thinpooldev` argument provided. + +In loopback, a thin pool is created at `/var/lib/docker/devicemapper` +(devicemapper graph location) based on two block devices, one for +data and one for metadata. By default these block devices are created +automatically by using loopback mounts of automatically created sparse +files. + +The default loopback files used are +`/var/lib/docker/devicemapper/devicemapper/data` and +`/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata +required to map from docker entities to the corresponding devicemapper +volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json` +file (encoded as Json). + +In order to support multiple devicemapper graphs on a system, the thin +pool will be named something like: `docker-0:33-19478248-pool`, where +the `0:33` part is the minor/major device nr and `19478248` is the +inode number of the `/var/lib/docker/devicemapper` directory. + +On the thin pool, docker automatically creates a base thin device, +called something like `docker-0:33-19478248-base` of a fixed +size. This is automatically formatted with an empty filesystem on +creation. This device is the base of all docker images and +containers. All base images are snapshots of this device and those +images are then in turn used as snapshots for other images and +eventually containers. + +## Information on `docker info` + +As of docker-1.4.1, `docker info` when using the `devicemapper` storage driver +will display something like: + + $ sudo docker info + [...] + Storage Driver: devicemapper + Pool Name: docker-253:1-17538953-pool + Pool Blocksize: 65.54 kB + Base Device Size: 107.4 GB + Data file: /dev/loop4 + Metadata file: /dev/loop4 + Data Space Used: 2.536 GB + Data Space Total: 107.4 GB + Data Space Available: 104.8 GB + Metadata Space Used: 7.93 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.14 GB + Udev Sync Supported: true + Data loop file: /home/docker/devicemapper/devicemapper/data + Metadata loop file: /home/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.82-git (2013-10-04) + [...] + +### status items + +Each item in the indented section under `Storage Driver: devicemapper` are +status information about the driver. + * `Pool Name` name of the devicemapper pool for this driver. + * `Pool Blocksize` tells the blocksize the thin pool was initialized with. This only changes on creation. + * `Base Device Size` tells the maximum size of a container and image + * `Data file` blockdevice file used for the devicemapper data + * `Metadata file` blockdevice file used for the devicemapper metadata + * `Data Space Used` tells how much of `Data file` is currently used + * `Data Space Total` tells max size the `Data file` + * `Data Space Available` tells how much free space there is in the `Data file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Metadata Space Used` tells how much of `Metadata file` is currently used + * `Metadata Space Total` tells max size the `Metadata file` + * `Metadata Space Available` tells how much free space there is in the `Metadata file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`. + * `Data loop file` file attached to `Data file`, if loopback device is used + * `Metadata loop file` file attached to `Metadata file`, if loopback device is used + * `Library Version` from the libdevmapper used + +## About the devicemapper options + +The devicemapper backend supports some options that you can specify +when starting the docker daemon using the `--storage-opt` flags. +This uses the `dm` prefix and would be used something like `dockerd --storage-opt dm.foo=bar`. + +These options are currently documented both in [the man +page](../../../man/docker.1.md) and in [the online +documentation](https://docs.docker.com/engine/reference/commandline/dockerd/#/storage-driver-options). +If you add an options, update both the `man` page and the documentation. diff --git a/components/engine/daemon/graphdriver/devmapper/device_setup.go b/components/engine/daemon/graphdriver/devmapper/device_setup.go new file mode 100644 index 0000000000..31eeae79f1 --- /dev/null +++ b/components/engine/daemon/graphdriver/devmapper/device_setup.go @@ -0,0 +1,247 @@ +package devmapper + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" +) + +type directLVMConfig struct { + Device string + ThinpPercent uint64 + ThinpMetaPercent uint64 + AutoExtendPercent uint64 + AutoExtendThreshold uint64 +} + +var ( + errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") + errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") + errMissingSetupDevice = errors.New("must provide device path in `dm.setup_device` in order to configure direct-lvm") +) + +func validateLVMConfig(cfg directLVMConfig) error { + if reflect.DeepEqual(cfg, directLVMConfig{}) { + return nil + } + if cfg.Device == "" { + return errMissingSetupDevice + } + if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { + return errThinpPercentMissing + } + + if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { + return errThinpPercentTooBig + } + return nil +} + +func checkDevAvailable(dev string) error { + lvmScan, err := exec.LookPath("lvmdiskscan") + if err != nil { + logrus.Debug("could not find lvmdiskscan") + return nil + } + + out, err := exec.Command(lvmScan).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + if !bytes.Contains(out, []byte(dev)) { + return errors.Errorf("%s is not available for use with devicemapper", dev) + } + return nil +} + +func checkDevInVG(dev string) error { + pvDisplay, err := exec.LookPath("pvdisplay") + if err != nil { + logrus.Debug("could not find pvdisplay") + return nil + } + + out, err := exec.Command(pvDisplay, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) + for scanner.Scan() { + fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") + if len(fields) > 1 { + // got "VG Name" line" + vg := strings.TrimSpace(fields[1]) + if len(vg) > 0 { + return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) + } + logrus.Error(fields) + break + } + } + return nil +} + +func checkDevHasFS(dev string) error { + blkid, err := exec.LookPath("blkid") + if err != nil { + logrus.Debug("could not find blkid") + return nil + } + + out, err := exec.Command(blkid, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + fields := bytes.Fields(out) + for _, f := range fields { + kv := bytes.Split(f, []byte{'='}) + if bytes.Equal(kv[0], []byte("TYPE")) { + v := bytes.Trim(kv[1], "\"") + if len(v) > 0 { + return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) + } + return nil + } + } + return nil +} + +func verifyBlockDevice(dev string, force bool) error { + if err := checkDevAvailable(dev); err != nil { + return err + } + if err := checkDevInVG(dev); err != nil { + return err + } + + if force { + return nil + } + + if err := checkDevHasFS(dev); err != nil { + return err + } + return nil +} + +func readLVMConfig(root string) (directLVMConfig, error) { + var cfg directLVMConfig + + p := filepath.Join(root, "setup-config.json") + b, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return cfg, nil + } + return cfg, errors.Wrap(err, "error reading existing setup config") + } + + // check if this is just an empty file, no need to produce a json error later if so + if len(b) == 0 { + return cfg, nil + } + + err = json.Unmarshal(b, &cfg) + return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") +} + +func writeLVMConfig(root string, cfg directLVMConfig) error { + p := filepath.Join(root, "setup-config.json") + b, err := json.Marshal(cfg) + if err != nil { + return errors.Wrap(err, "error marshalling direct lvm config") + } + err = ioutil.WriteFile(p, b, 0600) + return errors.Wrap(err, "error writing direct lvm config to file") +} + +func setupDirectLVM(cfg directLVMConfig) error { + pvCreate, err := exec.LookPath("pvcreate") + if err != nil { + return errors.Wrap(err, "error lookuping up command `pvcreate` while setting up direct lvm") + } + + vgCreate, err := exec.LookPath("vgcreate") + if err != nil { + return errors.Wrap(err, "error lookuping up command `vgcreate` while setting up direct lvm") + } + + lvCreate, err := exec.LookPath("lvcreate") + if err != nil { + return errors.Wrap(err, "error lookuping up command `lvcreate` while setting up direct lvm") + } + + lvConvert, err := exec.LookPath("lvconvert") + if err != nil { + return errors.Wrap(err, "error lookuping up command `lvconvert` while setting up direct lvm") + } + + lvChange, err := exec.LookPath("lvchange") + if err != nil { + return errors.Wrap(err, "error lookuping up command `lvchange` while setting up direct lvm") + } + + if cfg.AutoExtendPercent == 0 { + cfg.AutoExtendPercent = 20 + } + + if cfg.AutoExtendThreshold == 0 { + cfg.AutoExtendThreshold = 80 + } + + if cfg.ThinpPercent == 0 { + cfg.ThinpPercent = 95 + } + if cfg.ThinpMetaPercent == 0 { + cfg.ThinpMetaPercent = 1 + } + + out, err := exec.Command(pvCreate, "-f", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command(vgCreate, "docker", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command(lvCreate, "--wipesignatures", "y", "-n", "thinpool", "docker", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + out, err = exec.Command(lvCreate, "--wipesignatures", "y", "-n", "thinpoolmeta", "docker", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command(lvConvert, "-y", "--zero", "n", "-c", "512K", "--thinpool", "docker/thinpool", "--poolmetadata", "docker/thinpoolmeta").CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) + err = ioutil.WriteFile("/etc/lvm/profile/docker-thinpool.profile", []byte(profile), 0600) + if err != nil { + return errors.Wrap(err, "error writing docker thinp autoextend profile") + } + + out, err = exec.Command(lvChange, "--metadataprofile", "docker-thinpool", "docker/thinpool").CombinedOutput() + return errors.Wrap(err, string(out)) +} diff --git a/components/engine/daemon/graphdriver/devmapper/deviceset.go b/components/engine/daemon/graphdriver/devmapper/deviceset.go new file mode 100644 index 0000000000..876c14ea38 --- /dev/null +++ b/components/engine/daemon/graphdriver/devmapper/deviceset.go @@ -0,0 +1,2798 @@ +// +build linux + +package devmapper + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/loopback" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + units "github.com/docker/go-units" + "github.com/pkg/errors" + + "github.com/opencontainers/selinux/go-selinux/label" +) + +var ( + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + // We retry device removal so many a times that even error messages + // will fill up console during normal operation. So only log Fatal + // messages by default. + logLevel = devicemapper.LogLevelFatal + driverDeferredRemovalSupport = false + enableDeferredRemoval = false + enableDeferredDeletion = false + userBaseSize = false + defaultMinFreeSpacePercent uint32 = 10 + lvmSetupConfigForce bool +) + +const deviceSetMetaFile string = "deviceset-metadata" +const transactionMetaFile string = "transaction-metadata" + +type transaction struct { + OpenTransactionID uint64 `json:"open_transaction_id"` + DeviceIDHash string `json:"device_hash"` + DeviceID int `json:"device_id"` +} + +type devInfo struct { + Hash string `json:"-"` + DeviceID int `json:"device_id"` + Size uint64 `json:"size"` + TransactionID uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + Deleted bool `json:"deleted"` + devices *DeviceSet + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be acquired *before* the device lock, and + // multiple device locks should be acquired parent before child. + lock sync.Mutex +} + +type metaData struct { + Devices map[string]*devInfo `json:"Devices"` +} + +// DeviceSet holds information about list of devices +type DeviceSet struct { + metaData `json:"-"` + sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper + root string + devicePrefix string + TransactionID uint64 `json:"-"` + NextDeviceID int `json:"next_device_id"` + deviceIDMap []byte + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + deferredDelete bool // use deferred deletion + BaseDeviceUUID string // save UUID of base device + BaseDeviceFilesystem string // save filesystem of base device + nrDeletedDevices uint // number of deleted devices + deletionWorkerTicker *time.Ticker + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + minFreeSpacePercent uint32 //min free space percentage in thinpool + xfsNospaceRetries string // max retries when xfs receives ENOSPC + lvmSetupConfig directLVMConfig +} + +// DiskUsage contains information about disk usage and is used when reporting Status of a device. +type DiskUsage struct { + // Used bytes on the disk. + Used uint64 + // Total bytes on the disk. + Total uint64 + // Available bytes on the disk. + Available uint64 +} + +// Status returns the information about the device. +type Status struct { + // PoolName is the name of the data pool. + PoolName string + // DataFile is the actual block device for data. + DataFile string + // DataLoopback loopback file, if used. + DataLoopback string + // MetadataFile is the actual block device for metadata. + MetadataFile string + // MetadataLoopback is the loopback file, if used. + MetadataLoopback string + // Data is the disk used for data. + Data DiskUsage + // Metadata is the disk used for meta data. + Metadata DiskUsage + // BaseDeviceSize is base size of container and image + BaseDeviceSize uint64 + // BaseDeviceFS is backing filesystem. + BaseDeviceFS string + // SectorSize size of the vector. + SectorSize uint64 + // UdevSyncSupported is true if sync is supported. + UdevSyncSupported bool + // DeferredRemoveEnabled is true then the device is not unmounted. + DeferredRemoveEnabled bool + // True if deferred deletion is enabled. This is different from + // deferred removal. "removal" means that device mapper device is + // deactivated. Thin device is still in thin pool and can be activated + // again. But "deletion" means that thin device will be deleted from + // thin pool and it can't be activated again. + DeferredDeleteEnabled bool + DeferredDeletedDeviceCount uint + MinFreeSpace uint64 +} + +// Structure used to export image/container metadata in docker inspect. +type deviceMetadata struct { + deviceID int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +// DevStatus returns information about device mounted containing its id, size and sector information. +type DevStatus struct { + // DeviceID is the id of the device. + DeviceID int + // Size is the size of the filesystem. + Size uint64 + // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. + TransactionID uint64 + // SizeInSectors indicates the size of the sectors allocated. + SizeInSectors uint64 + // MappedSectors indicates number of mapped sectors. + MappedSectors uint64 + // HighestMappedSector is the pointer to the highest mapped sector. + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *devInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *devInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *devInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists and new size is larger than its current size, it grows to the new size. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + + if fi, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } else { + if fi.Size() < size { + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + if err := file.Truncate(size); err != nil { + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + } + } else if fi.Size() > size { + logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionID() uint64 { + devices.OpenTransactionID = devices.TransactionID + 1 + return devices.OpenTransactionID +} + +func (devices *DeviceSet) updatePoolTransactionID() error { + if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { + return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) + } + devices.TransactionID = devices.OpenTransactionID + return nil +} + +func (devices *DeviceSet) removeMetadata(info *devInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("devmapper: Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *devInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { + var mask byte + i := deviceID % 8 + mask = 1 << uint(i) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask +} + +func (devices *DeviceSet) markDeviceIDFree(deviceID int) { + var mask byte + i := deviceID % 8 + mask = ^(1 << uint(i)) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask +} + +func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { + var mask byte + i := deviceID % 8 + mask = (1 << uint(i)) + return (devices.deviceIDMap[deviceID/8] & mask) == 0 +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("devmapper: Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + info, err := devices.lookupDevice(hash) + return info, err +} + +// This function relies on that device hash map has been loaded in advance. +// Should be called with devices.Lock() held. +func (devices *DeviceSet) constructDeviceIDMap() { + logrus.Debug("devmapper: constructDeviceIDMap()") + defer logrus.Debug("devmapper: constructDeviceIDMap() END") + + for _, info := range devices.Devices { + devices.markDeviceIDUsed(info.DeviceID) + logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + } +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if strings.HasPrefix(finfo.Name(), ".") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == transactionMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + logrus.Debugf("devmapper: Loading data for file %s", path) + + hash := finfo.Name() + if hash == "base" { + hash = "" + } + + // Include deleted devices also as cleanup delete device logic + // will go through it and see if there are any deleted devices. + if _, err := devices.lookupDevice(hash); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + } + + return nil +} + +func (devices *DeviceSet) loadDeviceFilesOnStart() error { + logrus.Debug("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("devmapper: Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) unregisterDevice(hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v)", hash) + info := &devInfo{ + Hash: hash, + } + + delete(devices.Devices, hash) + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("devmapper: Error removing metadata: %s", err) + return err + } + + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { + logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + Size: size, + TransactionID: transactionID, + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { + logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + + if info.Deleted && !ignoreDeleted { + return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) + } + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { + return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) +} + +// Return true only if kernel supports xfs and mkfs.xfs is available +func xfsSupported() bool { + // Make sure mkfs.xfs is available + if _, err := exec.LookPath("mkfs.xfs"); err != nil { + return false + } + + // Check if kernel supports xfs filesystem or not. + exec.Command("modprobe", "xfs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + return false + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.HasSuffix(s.Text(), "\txfs") { + return true + } + } + + if err := s.Err(); err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + } + return false +} + +func determineDefaultFS() string { + if xfsSupported() { + return "xfs" + } + + logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") + return "ext4" +} + +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() + + args := []string{} + args = append(args, devices.mkfsArgs...) + + args = append(args, devname) + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { + return err + } + + logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) + defer func() { + if err != nil { + logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + } else { + logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + } + }() + + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) + } + return +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := metaData{Devices: make(map[string]*devInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +// Cleanup deleted devices. It assumes that all the devices have been +// loaded in the hash table. +func (devices *DeviceSet) cleanupDeletedDevices() error { + devices.Lock() + + // If there are no deleted devices, there is nothing to do. + if devices.nrDeletedDevices == 0 { + devices.Unlock() + return nil + } + + var deletedDevices []*devInfo + + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + deletedDevices = append(deletedDevices, info) + } + + // Delete the deleted devices. DeleteDevice() first takes the info lock + // and then devices.Lock(). So drop it to avoid deadlock. + devices.Unlock() + + for _, info := range deletedDevices { + // This will again try deferred deletion. + if err := devices.DeleteDevice(info.Hash, false); err != nil { + logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + } + } + + return nil +} + +func (devices *DeviceSet) countDeletedDevices() { + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + devices.nrDeletedDevices++ + } +} + +func (devices *DeviceSet) startDeviceDeletionWorker() { + // Deferred deletion is not enabled. Don't do anything. + if !devices.deferredDelete { + return + } + + logrus.Debug("devmapper: Worker to cleanup deleted devices started") + for range devices.deletionWorkerTicker.C { + devices.cleanupDeletedDevices() + } +} + +func (devices *DeviceSet) initMetaData() error { + devices.Lock() + defer devices.Unlock() + + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionID, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionID = transactionID + + if err := devices.loadDeviceFilesOnStart(); err != nil { + return fmt.Errorf("devmapper: Failed to load device files:%v", err) + } + + devices.constructDeviceIDMap() + devices.countDeletedDevices() + + if err := devices.processPendingTransaction(); err != nil { + return err + } + + // Start a goroutine to cleanup Deleted Devices + go devices.startDeviceDeletionWorker() + return nil +} + +func (devices *DeviceSet) incNextDeviceID() { + // IDs are 24bit, so wrap around + devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID +} + +func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { + devices.incNextDeviceID() + for i := 0; i <= maxDeviceID; i++ { + if devices.isDeviceIDFree(devices.NextDeviceID) { + devices.markDeviceIDUsed(devices.NextDeviceID) + return devices.NextDeviceID, nil + } + devices.incNextDeviceID() + } + + return 0, fmt.Errorf("devmapper: Unable to find a free device ID") +} + +func (devices *DeviceSet) poolHasFreeSpace() error { + if devices.minFreeSpacePercent == 0 { + return nil + } + + _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err != nil { + return err + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeData < 1 { + minFreeData = 1 + } + dataFree := dataTotal - dataUsed + if dataFree < minFreeData { + return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) + } + + minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeMetadata < 1 { + minFreeMetadata = 1 + } + + metadataFree := metadataTotal - metadataUsed + if metadataFree < minFreeMetadata { + return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) + } + + return nil +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating device: %s", err) + devices.markDeviceIDFree(deviceID) + return nil, err + } + break + } + + logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { + var ( + devinfo *devicemapper.Info + err error + ) + + if err = devices.poolHasFreeSpace(); err != nil { + return err + } + + if devices.deferredRemove { + devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) + if err != nil { + return err + } + if devinfo != nil && devinfo.DeferredRemove != 0 { + err = devices.cancelDeferredRemoval(baseInfo) + if err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if err != devicemapper.ErrEnxio { + return err + } + devinfo = nil + } else { + defer devices.deactivateDevice(baseInfo) + } + } + } else { + devinfo, err = devicemapper.GetInfo(baseInfo.Name()) + if err != nil { + return err + } + } + + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { + return err + } + defer devicemapper.ResumeDevice(baseInfo.Name()) + } + + if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + + for { + if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating snap device: %s", err) + devices.markDeviceIDFree(deviceID) + return err + } + break + } + + if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + logrus.Debugf("devmapper: Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *devInfo { + info := &devInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if info.DeviceID > maxDeviceID { + logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) getBaseDeviceSize() uint64 { + info, _ := devices.lookupDevice("") + if info == nil { + return 0 + } + return info.Size +} + +func (devices *DeviceSet) getBaseDeviceFS() string { + return devices.BaseDeviceFilesystem +} + +func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) + } + + if devices.BaseDeviceFilesystem == "" { + fsType, err := ProbeFsType(baseInfo.DevName()) + if err != nil { + return err + } + if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { + return err + } + } + + // If user specified a filesystem using dm.fs option and current + // file system of base image is not same, warn user that dm.fs + // will be ignored. + if devices.BaseDeviceFilesystem != devices.filesystem { + logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + devices.filesystem = devices.BaseDeviceFilesystem + } + return nil +} + +func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { + devices.BaseDeviceFilesystem = fs + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) createBaseImage() error { + logrus.Debug("devmapper: Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + + return nil +} + +// Returns if thin pool device exists or not. If device exists, also makes +// sure it is a thin pool device and not some other type of device. +func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { + logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + + info, err := devicemapper.GetInfo(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + } + + // Device does not exist. + if info.Exists == 0 { + return false, nil + } + + _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + } + + if deviceType != "thin-pool" { + return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) + } + + return true, nil +} + +func (devices *DeviceSet) checkThinPool() error { + _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionID != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", + devices.thinPoolDevice) + } + return nil +} + +// Base image is initialized properly. Either save UUID for first time (for +// upgrade case or verify UUID. +func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { + // If BaseDeviceUUID is nil (upgrade case), save it and return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) + } + + return nil +} + +func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { + + if !userBaseSize { + return nil + } + + if devices.baseFsSize < devices.getBaseDeviceSize() { + return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) + } + + if devices.baseFsSize == devices.getBaseDeviceSize() { + return nil + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + info.Size = devices.baseFsSize + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, info.Hash) + return err + } + + return devices.growFS(info) +} + +func (devices *DeviceSet) growFS(info *devInfo) error { + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("Error activating devmapper device: %s", err) + } + + defer devices.deactivateDevice(info) + + fsMountPoint := "/run/docker/mnt" + if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + return err + } + defer os.RemoveAll(fsMountPoint) + } + + options := "" + if devices.BaseDeviceFilesystem == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + options = joinMountOptions(options, devices.mountOptions) + + if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) + } + + defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) + + switch devices.BaseDeviceFilesystem { + case "ext4": + if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + case "xfs": + if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + default: + return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDeviceWithLock("") + + // base image already exists. If it is initialized properly, do UUID + // verification and return. Otherwise remove image and set it up + // fresh. + + if oldInfo != nil { + if oldInfo.Initialized && !oldInfo.Deleted { + if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { + return err + } + + if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { + return err + } + + return nil + } + + logrus.Debug("devmapper: Removing uninitialized base image") + // If previous base device is in deferred delete state, + // that needs to be cleaned up first. So don't try + // deferred deletion. + if err := devices.DeleteDevice("", true); err != nil { + return err + } + } + + // If we are setting up base image for the first time, make sure + // thin pool is empty. + if devices.thinPoolDevice != "" && oldInfo == nil { + if err := devices.checkThinPool(); err != nil { + return err + } + } + + // Create new base image device + if err := devices.createBaseImage(); err != nil { + return err + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + syscall.CloseOnExec(fd) + } + } + } + } +} + +// DMLog implements logging using DevMapperLogger interface. +func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { + // By default libdm sends us all the messages including debug ones. + // We need to filter out messages here and figure out which one + // should be printed. + if level > logLevel { + return + } + + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + if level <= devicemapper.LogLevelErr { + logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else if level <= devicemapper.LogLevelInfo { + logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else { + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ResizePool increases the size of the pool. +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("devmapper: Can't shrink file") + } + + dataloopback := loopback.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := loopback.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := loopback.SetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("devmapper: Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionID = devices.TransactionID + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.transaction) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + return os.RemoveAll(devices.transactionMetaFile()) +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { + logrus.Errorf("devmapper: Unable to delete device: %s", err) + } + + dinfo := &devInfo{Hash: devices.DeviceIDHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + } else { + devices.markDeviceIDFree(devices.DeviceID) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction ID is same + // as open transaction ID, nothing to roll back. + if devices.TransactionID == devices.OpenTransactionID { + return nil + } + + // If open transaction ID is less than pool transaction ID, something + // is wrong. Bail out. + if devices.OpenTransactionID < devices.TransactionID { + logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + return nil + } + + // Pool transaction ID is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionID = devices.TransactionID + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { + devices.allocateTransactionID() + devices.DeviceIDHash = hash + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceID int) error { + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionID(); err != nil { + logrus.Debug("devmapper: Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + /* + * Driver version 4.27.0 and greater support deferred activation + * feature. + */ + + logrus.Debugf("devicemapper: driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return graphdriver.ErrNotSupported + } + + if major > 4 { + driverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return graphdriver.ErrNotSupported + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + driverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + stat, err := file.Stat() + if err != nil { + return 0, 0, err + } + + dev := stat.Sys().(*syscall.Stat_t).Rdev + majorNum := major(dev) + minorNum := minor(dev) + + logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("devmapper: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := loopback.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) enableDeferredRemovalDeletion() error { + + // If user asked for deferred removal then check both libdm library + // and kernel driver support deferred removal otherwise error out. + if enableDeferredRemoval { + if !driverDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") + } + if !devicemapper.LibraryDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") + } + logrus.Debug("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + if enableDeferredDeletion { + if !devices.deferredRemove { + return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") + } + logrus.Debug("devmapper: Deferred deletion support enabled.") + devices.deferredDelete = true + } + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) error { + // give ourselves to libdm as a log handler + devicemapper.LogInit(devices) + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return graphdriver.ErrNotSupported + } + + if err := devices.enableDeferredRemovalDeletion(); err != nil { + return err + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + if dockerversion.IAmStatic == "true" { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") + } else { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") + } + + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } + } + + //create the root dir of the devmapper driver ownership to match this + //daemon's remapped root uid/gid so containers can start properly + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + prevSetupConfig, err := readLVMConfig(devices.root) + if err != nil { + return err + } + + if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { + if devices.thinPoolDevice != "" { + return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") + } + + if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { + if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { + return errors.New("changing direct-lvm config is not supported") + } + logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") + if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { + return err + } + if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { + return err + } + if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { + return err + } + } + devices.thinPoolDevice = "docker-thinpool" + logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) + } + + // Set the device prefix from the device id and inode of the docker root dir + st, err := os.Stat(devices.root) + if err != nil { + return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) + } + sysSt := st.Sys().(*syscall.Stat_t) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + poolExists, err := devices.thinPoolExists(devices.getPoolName()) + if err != nil { + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if !poolExists && devices.thinPoolDevice == "" { + logrus.Debug("devmapper: Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = loopback.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = loopback.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if poolExists && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + if devices.thinPoolDevice == "" { + if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { + logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.") + } + } + + // Right now this loads only NextDeviceID. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +// AddDevice adds a device and registers in the hash. +func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { + logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) + + // If a deleted device exists, return error. + baseInfo, err := devices.lookupDeviceWithLock(baseHash) + if err != nil { + return err + } + + if baseInfo.Deleted { + return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // Also include deleted devices in case hash of new device is + // same as one of the deleted devices. + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) + } + + size, err := devices.parseStorageOpt(storageOpt) + if err != nil { + return err + } + + if size == 0 { + size = baseInfo.Size + } + + if size < baseInfo.Size { + return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) + } + + if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { + return err + } + + // Grow the container rootfs. + if size > baseInfo.Size { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + if err := devices.growFS(info); err != nil { + return err + } + } + + return nil +} + +func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return 0, err + } + return uint64(size), nil + default: + return 0, fmt.Errorf("Unknown option %s", key) + } + } + + return 0, nil +} + +func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { + // If device is already in deleted state, there is nothing to be done. + if info.Deleted { + return nil + } + + logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + + info.Deleted = true + + // save device metadata to reflect deleted state. + if err := devices.saveMetadata(info); err != nil { + info.Deleted = false + return err + } + + devices.nrDeletedDevices++ + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { + if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) + return err + } + + defer devices.closeTransaction() + + err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) + if err != nil { + // If syncDelete is true, we want to return error. If deferred + // deletion is not enabled, we return an error. If error is + // something other then EBUSY, return an error. + if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { + logrus.Debugf("devmapper: Error deleting device: %s", err) + return err + } + } + + if err == nil { + if err := devices.unregisterDevice(info.Hash); err != nil { + return err + } + // If device was already in deferred delete state that means + // deletion was being tried again later. Reduce the deleted + // device count. + if info.Deleted { + devices.nrDeletedDevices-- + } + devices.markDeviceIDFree(info.DeviceID) + } else { + if err := devices.markForDeferredDeletion(info); err != nil { + return err + } + } + + return nil +} + +// Issue discard only if device open count is zero. +func (devices *DeviceSet) issueDiscard(info *devInfo) error { + logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually. + // Even if device is deferred deleted, activate it and issue + // discards. + if err := devices.activateDeviceIfNeeded(info, true); err != nil { + return err + } + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.OpenCount != 0 { + logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + return nil + } + + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + } + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { + if devices.doBlkDiscard { + devices.issueDiscard(info) + } + + // Try to deactivate device in case it is active. + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Error deactivating device: %s", err) + return err + } + + if err := devices.deleteTransaction(info, syncDelete); err != nil { + return err + } + + return nil +} + +// DeleteDevice will return success if device has been marked for deferred +// removal. If one wants to override that and want DeleteDevice() to fail if +// device was busy and could not be deleted, set syncDelete=true. +func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { + logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info, syncDelete) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debug("devmapper: deactivatePool() START") + defer logrus.Debug("devmapper: deactivatePool() END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + if err := devicemapper.RemoveDevice(devname); err != nil { + return err + } + + if d, err := devicemapper.GetDeps(devname); err == nil { + logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *devInfo) error { + logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) + defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if devices.deferredRemove { + if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { + return err + } + } else { + if err := devices.removeDevice(info.Name()); err != nil { + return err + } + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("devmapper: removeDevice START(%s)", devname) + defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + if err != nil { + return err + } + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + if err := devices.cancelDeferredRemoval(info); err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if err != devicemapper.ErrEnxio { + return err + } + } + return nil +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + var err error + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err != nil { + if err == devicemapper.ErrBusy { + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + continue + } + } + break + } + return err +} + +// Shutdown shuts down the device by unmounting the root. +func (devices *DeviceSet) Shutdown(home string) error { + logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + + // Stop deletion worker. This should start delivering new events to + // ticker channel. That means no new instance of cleanupDeletedDevice() + // will run after this call. If one instance is already running at + // the time of the call, it must be holding devices.Lock() and + // we will block on this lock till cleanup function exits. + devices.deletionWorkerTicker.Stop() + + devices.Lock() + // Save DeviceSet Metadata first. Docker kills all threads if they + // don't finish in certain time. It is possible that Shutdown() + // routine does not finish in time as we loop trying to deactivate + // some devices while these are busy. In that case shutdown() routine + // will be killed and we will not get a chance to save deviceset + // metadata. Hence save this early before trying to deactivate devices. + devices.saveDeviceSetMetaData() + + // ignore the error since it's just a best effort to not try to unmount something that's mounted + mounts, _ := mount.GetMounts() + mounted := make(map[string]bool, len(mounts)) + for _, mnt := range mounts { + mounted[mnt.Mountpoint] = true + } + + if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + + if mounted[p] { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil { + logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) + } + } + + if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { + logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err) + } + } + + return nil + }); err != nil && !os.IsNotExist(err) { + devices.Unlock() + return err + } + + devices.Unlock() + + info, _ := devices.lookupDeviceWithLock("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + } + } + devices.Unlock() + + return nil +} + +// Recent XFS changes allow changing behavior of filesystem in case of errors. +// When thin pool gets full and XFS gets ENOSPC error, currently it tries +// IO infinitely and sometimes it can block the container process +// and process can't be killWith 0 value, XFS will not retry upon error +// and instead will shutdown filesystem. + +func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { + dmDevicePath, err := os.Readlink(info.DevName()) + if err != nil { + return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) + } + + dmDeviceName := path.Base(dmDevicePath) + filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" + maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) + } + defer maxRetriesFile.Close() + + // Set max retries to 0 + _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) + if err != nil { + return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) + } + return nil +} + +// MountDevice mounts the device if not already mounted. +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + if info.Deleted { + return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { + return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + if fstype == "xfs" && devices.xfsNospaceRetries != "" { + if err := devices.xfsSetNospaceRetries(info); err != nil { + syscall.Unmount(path, syscall.MNT_DETACH) + devices.deactivateDevice(info) + return err + } + } + + return nil +} + +// UnmountDevice unmounts the device and removes it from hash. +func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { + logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) + + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + logrus.Debugf("devmapper: Unmount(%s)", mountPath) + if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { + return err + } + logrus.Debug("devmapper: Unmount done") + + return devices.deactivateDevice(info) +} + +// HasDevice returns true if the device metadata exists. +func (devices *DeviceSet) HasDevice(hash string) bool { + info, _ := devices.lookupDeviceWithLock(hash) + return info != nil +} + +// List returns a list of device ids. +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +// GetDeviceStatus provides size, mapped sectors +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceID: info.DeviceID, + Size: info.Size, + TransactionID: info.TransactionID, + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(syscall.Statfs_t) + if err := syscall.Statfs(loopFile, buf); err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + status.DeferredDeleteEnabled = devices.deferredDelete + status.DeferredDeletedDeviceCount = devices.nrDeletedDevices + status.BaseDeviceSize = devices.getBaseDeviceSize() + status.BaseDeviceFS = devices.getBaseDeviceFS() + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} + return metadata, nil +} + +// NewDeviceSet creates the device set based on the options provided. +func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + metaData: metaData{Devices: make(map[string]*devInfo)}, + dataLoopbackSize: defaultDataLoopbackSize, + metaDataLoopbackSize: defaultMetaDataLoopbackSize, + baseFsSize: defaultBaseFsSize, + overrideUdevSyncCheck: defaultUdevSyncOverride, + doBlkDiscard: true, + thinpBlockSize: defaultThinpBlockSize, + deviceIDMap: make([]byte, deviceIDMapSz), + deletionWorkerTicker: time.NewTicker(time.Second * 30), + uidMaps: uidMaps, + gidMaps: gidMaps, + minFreeSpacePercent: defaultMinFreeSpacePercent, + } + + foundBlkDiscard := false + var lvmSetupConfig directLVMConfig + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + userBaseSize = true + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + enableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_deletion": + enableDeferredDeletion, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.min_free_space": + if !strings.HasSuffix(val, "%") { + return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") + } + + valstring := strings.TrimSuffix(val, "%") + minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) + if err != nil { + return nil, err + } + + if minFreeSpacePercent >= 100 { + return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) + } + + devices.minFreeSpacePercent = uint32(minFreeSpacePercent) + case "dm.xfs_nospace_max_retries": + _, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + devices.xfsNospaceRetries = val + case "dm.directlvm_device": + lvmSetupConfig.Device = val + case "dm.directlvm_device_force": + lvmSetupConfigForce, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.thinp_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpPercent = per + case "dm.thinp_metapercent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpMetaPercent = per + case "dm.thinp_autoextend_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendPercent = per + case "dm.thinp_autoextend_threshold": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendThreshold = per + default: + return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) + } + } + + if err := validateLVMConfig(lvmSetupConfig); err != nil { + return nil, err + } + + devices.lvmSetupConfig = lvmSetupConfig + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/components/engine/daemon/graphdriver/devmapper/devmapper_doc.go b/components/engine/daemon/graphdriver/devmapper/devmapper_doc.go new file mode 100644 index 0000000000..9ab3e4f864 --- /dev/null +++ b/components/engine/daemon/graphdriver/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognized ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/components/engine/daemon/graphdriver/devmapper/devmapper_test.go b/components/engine/daemon/graphdriver/devmapper/devmapper_test.go new file mode 100644 index 0000000000..c5be97ae3f --- /dev/null +++ b/components/engine/daemon/graphdriver/devmapper/devmapper_test.go @@ -0,0 +1,110 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "testing" + "time" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +func init() { + // Reduce the size of the base fs and loopback for the tests + defaultDataLoopbackSize = 300 * 1024 * 1024 + defaultMetaDataLoopbackSize = 200 * 1024 * 1024 + defaultBaseFsSize = 300 * 1024 * 1024 + defaultUdevSyncOverride = true + if err := graphtest.InitLoopbacks(); err != nil { + panic(err) + } +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown +func TestDevmapperSetup(t *testing.T) { + graphtest.GetDriver(t, "devicemapper") +} + +func TestDevmapperCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "devicemapper") +} + +func TestDevmapperCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "devicemapper") +} + +func TestDevmapperCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "devicemapper") +} + +func TestDevmapperTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +func TestDevmapperReduceLoopBackSize(t *testing.T) { + tenMB := int64(10 * 1024 * 1024) + testChangeLoopBackSize(t, -tenMB, defaultDataLoopbackSize, defaultMetaDataLoopbackSize) +} + +func TestDevmapperIncreaseLoopBackSize(t *testing.T) { + tenMB := int64(10 * 1024 * 1024) + testChangeLoopBackSize(t, tenMB, defaultDataLoopbackSize+tenMB, defaultMetaDataLoopbackSize+tenMB) +} + +func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataSize int64) { + driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + defer graphtest.PutDriver(t) + // make sure data or metadata loopback size are the default size + if s := driver.DeviceSet.Status(); s.Data.Total != uint64(defaultDataLoopbackSize) || s.Metadata.Total != uint64(defaultMetaDataLoopbackSize) { + t.Fatal("data or metadata loop back size is incorrect") + } + if err := driver.Cleanup(); err != nil { + t.Fatal(err) + } + //Reload + d, err := Init(driver.home, []string{ + fmt.Sprintf("dm.loopdatasize=%d", defaultDataLoopbackSize+delta), + fmt.Sprintf("dm.loopmetadatasize=%d", defaultMetaDataLoopbackSize+delta), + }, nil, nil) + if err != nil { + t.Fatalf("error creating devicemapper driver: %v", err) + } + driver = d.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + if s := driver.DeviceSet.Status(); s.Data.Total != uint64(expectDataSize) || s.Metadata.Total != uint64(expectMetaDataSize) { + t.Fatal("data or metadata loop back size is incorrect") + } + if err := driver.Cleanup(); err != nil { + t.Fatal(err) + } +} + +// Make sure devices.Lock() has been release upon return from cleanupDeletedDevices() function +func TestDevmapperLockReleasedDeviceDeletion(t *testing.T) { + driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + defer graphtest.PutDriver(t) + + // Call cleanupDeletedDevices() and after the call take and release + // DeviceSet Lock. If lock has not been released, this will hang. + driver.DeviceSet.cleanupDeletedDevices() + + doneChan := make(chan bool) + + go func() { + driver.DeviceSet.Lock() + defer driver.DeviceSet.Unlock() + doneChan <- true + }() + + select { + case <-time.After(time.Second * 5): + // Timer expired. That means lock was not released upon + // function return and we are deadlocked. Release lock + // here so that cleanup could succeed and fail the test. + driver.DeviceSet.Unlock() + t.Fatal("Could not acquire devices lock after call to cleanupDeletedDevices()") + case <-doneChan: + } +} diff --git a/components/engine/daemon/graphdriver/devmapper/driver.go b/components/engine/daemon/graphdriver/devmapper/driver.go new file mode 100644 index 0000000000..243d88a8bb --- /dev/null +++ b/components/engine/daemon/graphdriver/devmapper/driver.go @@ -0,0 +1,241 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/system" + units "github.com/docker/go-units" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Driver contains the device set mounted and the home directory +type Driver struct { + *DeviceSet + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + locker *locker.Locker +} + +// Init creates a driver with the given home and the set of options. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + locker: locker.New(), + } + + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +// Status returns the status about the driver in a printable format. +// Information returned contains Pool Name, Data File, Metadata file, disk usage by +// the data and metadata, etc. +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, + {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, + {"Backing Filesystem", s.BaseDeviceFS}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, + {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, + {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, + {"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +// GetMetadata returns a map of information about the device. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.exportDeviceMetadata(id) + + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceID) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +// Cleanup unmounts a device. +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown(d.home) + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create adds a device with a given id and the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { + return err + } + + return nil +} + +// Remove removes a device with a given id, unmounts the filesystem. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id, false); err != nil { + return fmt.Errorf("failed to remove device %s: %v", id, err) + } + + mp := path.Join(d.home, "mnt", id) + if err := system.EnsureRemoveAll(mp); err != nil { + return err + } + + return nil +} + +// Get mounts a device with given id into the root filesystem +func (d *Driver) Get(id, mountLabel string) (string, error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + rootFs := path.Join(mp, "rootfs") + if count := d.ctr.Increment(mp); count > 1 { + return rootFs, nil + } + + uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mp) + return "", err + } + + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { + d.ctr.Decrement(mp) + return "", err + } + + if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconstruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + } + + return rootFs, nil +} + +// Put unmounts a device and removes it. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + if count := d.ctr.Decrement(mp); count > 0 { + return nil + } + err := d.DeviceSet.UnmountDevice(id, mp) + if err != nil { + logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) + } + return err +} + +// Exists checks to see if the device exists. +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} diff --git a/components/engine/daemon/graphdriver/devmapper/mount.go b/components/engine/daemon/graphdriver/devmapper/mount.go new file mode 100644 index 0000000000..cca1fe1b38 --- /dev/null +++ b/components/engine/daemon/graphdriver/devmapper/mount.go @@ -0,0 +1,89 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "syscall" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +// Mounted returns true if a mount point exists. +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +// ProbeFsType returns the filesystem name for the given device id. +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/components/engine/daemon/graphdriver/driver.go b/components/engine/daemon/graphdriver/driver.go new file mode 100644 index 0000000000..88f190d9e1 --- /dev/null +++ b/components/engine/daemon/graphdriver/driver.go @@ -0,0 +1,287 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites returned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string +} + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver +} + +// Capabilities defines a list of capabilities a driver may implement. +// These capabilities are not required; however, they do determine how a +// graphdriver can be used. +type Capabilities struct { + // Flags that this driver is capable of reproducing exactly equivalent + // diffs for read-only layers. If set, clients can rely on the driver + // for consistent tar streams, and avoid extra processing to account + // for potential differences (eg: the layer store's use of tar-split). + ReproducesExactDiffs bool +} + +// CapabilityDriver is the interface for layered file system drivers that +// can report on their Capabilities. +type CapabilityDriver interface { + Capabilities() Capabilities +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + } + + pluginDriver, err := lookupPlugin(name, pg, config) + if err == nil { + return pluginDriver, nil + } + logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver + return GetDriver(name, pg, config) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(config.Root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + driversMap[driver] = true + } + } + return driversMap +} diff --git a/components/engine/daemon/graphdriver/driver_freebsd.go b/components/engine/daemon/graphdriver/driver_freebsd.go new file mode 100644 index 0000000000..2891a84f3a --- /dev/null +++ b/components/engine/daemon/graphdriver/driver_freebsd.go @@ -0,0 +1,19 @@ +package graphdriver + +import "syscall" + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/components/engine/daemon/graphdriver/driver_linux.go b/components/engine/daemon/graphdriver/driver_linux.go new file mode 100644 index 0000000000..5c8d0e2301 --- /dev/null +++ b/components/engine/daemon/graphdriver/driver_linux.go @@ -0,0 +1,135 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/mount" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "zfs", + "overlay2", + "overlay", + "devicemapper", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicOverlay: "overlayfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/components/engine/daemon/graphdriver/driver_solaris.go b/components/engine/daemon/graphdriver/driver_solaris.go new file mode 100644 index 0000000000..7daf01c32d --- /dev/null +++ b/components/engine/daemon/graphdriver/driver_solaris.go @@ -0,0 +1,97 @@ +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on Solaris. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +//Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + + cs := C.CString(filepath.Dir(mountPath)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + C.free(unsafe.Pointer(buf)) + return false, ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return true, nil +} diff --git a/components/engine/daemon/graphdriver/driver_unsupported.go b/components/engine/daemon/graphdriver/driver_unsupported.go new file mode 100644 index 0000000000..4a875608b0 --- /dev/null +++ b/components/engine/daemon/graphdriver/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd,!solaris + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/components/engine/daemon/graphdriver/driver_windows.go b/components/engine/daemon/graphdriver/driver_windows.go new file mode 100644 index 0000000000..ffd30c2950 --- /dev/null +++ b/components/engine/daemon/graphdriver/driver_windows.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/components/engine/daemon/graphdriver/fsdiff.go b/components/engine/daemon/graphdriver/fsdiff.go new file mode 100644 index 0000000000..20826cd7d2 --- /dev/null +++ b/components/engine/daemon/graphdriver/fsdiff.go @@ -0,0 +1,169 @@ +package graphdriver + +import ( + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/components/engine/daemon/graphdriver/graphtest/graphbench_unix.go b/components/engine/daemon/graphdriver/graphtest/graphbench_unix.go new file mode 100644 index 0000000000..def822b9a1 --- /dev/null +++ b/components/engine/daemon/graphdriver/graphtest/graphbench_unix.go @@ -0,0 +1,259 @@ +// +build linux freebsd + +package graphtest + +import ( + "bytes" + "io" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/stringid" +) + +// DriverBenchExists benchmarks calls to exist +func DriverBenchExists(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if !driver.Exists(base) { + b.Fatal("Newly created image doesn't exist") + } + } +} + +// DriverBenchGetEmpty benchmarks calls to get on an empty layer +func DriverBenchGetEmpty(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := driver.Get(base, "") + b.StopTimer() + if err != nil { + b.Fatalf("Error getting mount: %s", err) + } + if err := driver.Put(base); err != nil { + b.Fatalf("Error putting mount: %s", err) + } + b.StartTimer() + } +} + +// DriverBenchDiffBase benchmarks calls to diff on a root layer +func DriverBenchDiffBase(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addFiles(driver, base, 3); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(base, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDiffN benchmarks calls to diff on two layers with +// a provided number of files on the lower and upper layers. +func DriverBenchDiffN(b *testing.B, bottom, top int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, base, bottom, 3); err != nil { + b.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, upper, top, 6); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(upper, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDiffApplyN benchmarks calls to diff and apply together +func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, base, fileCount, 3); err != nil { + b.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, upper, fileCount, 6); err != nil { + b.Fatal(err) + } + diffSize, err := driver.DiffSize(upper, "") + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + b.StopTimer() + for i := 0; i < b.N; i++ { + diff := stringid.GenerateRandomID() + if err := driver.Create(diff, base, nil); err != nil { + b.Fatal(err) + } + + if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { + b.Fatal(err) + } + + b.StartTimer() + + arch, err := driver.Diff(upper, "") + if err != nil { + b.Fatal(err) + } + + applyDiffSize, err := driver.ApplyDiff(diff, "", arch) + if err != nil { + b.Fatal(err) + } + + b.StopTimer() + arch.Close() + + if applyDiffSize != diffSize { + // TODO: enforce this + //b.Fatalf("Apply diff size different, got %d, expected %s", applyDiffSize, diffSize) + } + if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { + b.Fatal(err) + } + } +} + +// DriverBenchDeepLayerDiff benchmarks calls to diff on top of a given number of layers. +func DriverBenchDeepLayerDiff(b *testing.B, layerCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addFiles(driver, base, 50); err != nil { + b.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(topLayer, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDeepLayerRead benchmarks calls to read a file under a given number of layers. +func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + content := []byte("test content") + if err := addFile(driver, base, "testfile.txt", content); err != nil { + b.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + b.Fatal(err) + } + + root, err := driver.Get(topLayer, "") + if err != nil { + b.Fatal(err) + } + defer driver.Put(topLayer) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + + // Read content + c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt")) + if err != nil { + b.Fatal(err) + } + + b.StopTimer() + if bytes.Compare(c, content) != 0 { + b.Fatalf("Wrong content in file %v, expected %v", c, content) + } + b.StartTimer() + } +} diff --git a/components/engine/daemon/graphdriver/graphtest/graphtest_unix.go b/components/engine/daemon/graphdriver/graphtest/graphtest_unix.go new file mode 100644 index 0000000000..6e952de78b --- /dev/null +++ b/components/engine/daemon/graphdriver/graphtest/graphtest_unix.go @@ -0,0 +1,358 @@ +// +build linux freebsd solaris + +package graphtest + +import ( + "bytes" + "io/ioutil" + "math/rand" + "os" + "path" + "reflect" + "syscall" + "testing" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" +) + +var ( + drv *Driver +) + +// Driver conforms to graphdriver.Driver interface and +// contains information such as root and reference count of the number of clients using it. +// This helps in testing drivers added into the framework. +type Driver struct { + graphdriver.Driver + root string + refCount int +} + +func newDriver(t testing.TB, name string, options []string) *Driver { + root, err := ioutil.TempDir("", "docker-graphtest-") + if err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(root, 0755); err != nil { + t.Fatal(err) + } + + d, err := graphdriver.GetDriver(name, nil, graphdriver.Options{DriverOptions: options, Root: root}) + if err != nil { + t.Logf("graphdriver: %v\n", err) + if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS { + t.Skipf("Driver %s not supported", name) + } + t.Fatal(err) + } + return &Driver{d, root, 1} +} + +func cleanup(t testing.TB, d *Driver) { + if err := drv.Cleanup(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.root) +} + +// GetDriver create a new driver with given name or return an existing driver with the name updating the reference count. +func GetDriver(t testing.TB, name string, options ...string) graphdriver.Driver { + if drv == nil { + drv = newDriver(t, name, options) + } else { + drv.refCount++ + } + return drv +} + +// PutDriver removes the driver if it is no longer used and updates the reference count. +func PutDriver(t testing.TB) { + if drv == nil { + t.Skip("No driver to put!") + } + drv.refCount-- + if drv.refCount == 0 { + cleanup(t, drv) + drv = nil + } +} + +// DriverTestCreateEmpty creates a new image and verifies it is empty and the right metadata +func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + if err := driver.Create("empty", "", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := driver.Remove("empty"); err != nil { + t.Fatal(err) + } + }() + + if !driver.Exists("empty") { + t.Fatal("Newly created image doesn't exist") + } + + dir, err := driver.Get("empty", "") + if err != nil { + t.Fatal(err) + } + + verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + + // Verify that the directory is empty + fis, err := readDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 0 { + t.Fatal("New directory not empty") + } + + driver.Put("empty") +} + +// DriverTestCreateBase create a base driver and verify. +func DriverTestCreateBase(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + createBase(t, driver, "Base") + defer func() { + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } + }() + verifyBase(t, driver, "Base") +} + +// DriverTestCreateSnap Create a driver and snap and verify. +func DriverTestCreateSnap(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + createBase(t, driver, "Base") + + defer func() { + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } + }() + + if err := driver.Create("Snap", "Base", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := driver.Remove("Snap"); err != nil { + t.Fatal(err) + } + }() + + verifyBase(t, driver, "Snap") +} + +// DriverTestDeepLayerRead reads a file from a lower layer under a given number of layers +func DriverTestDeepLayerRead(t testing.TB, layerCount int, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + content := []byte("test content") + if err := addFile(driver, base, "testfile.txt", content); err != nil { + t.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + t.Fatal(err) + } + + err = checkManyLayers(driver, topLayer, layerCount) + if err != nil { + t.Fatal(err) + } + + if err := checkFile(driver, topLayer, "testfile.txt", content); err != nil { + t.Fatal(err) + } +} + +// DriverTestDiffApply tests diffing and applying produces the same layer +func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + deleteFile := "file-remove.txt" + deleteFileContent := []byte("This file should get removed in upper!") + deleteDir := "var/lib" + + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, base, fileCount, 3); err != nil { + t.Fatal(err) + } + + if err := addFile(driver, base, deleteFile, deleteFileContent); err != nil { + t.Fatal(err) + } + + if err := addDirectory(driver, base, deleteDir); err != nil { + t.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, upper, fileCount, 6); err != nil { + t.Fatal(err) + } + + if err := removeAll(driver, upper, deleteFile, deleteDir); err != nil { + t.Fatal(err) + } + + diffSize, err := driver.DiffSize(upper, "") + if err != nil { + t.Fatal(err) + } + + diff := stringid.GenerateRandomID() + if err := driver.Create(diff, base, nil); err != nil { + t.Fatal(err) + } + + if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { + t.Fatal(err) + } + + if err := checkFile(driver, diff, deleteFile, deleteFileContent); err != nil { + t.Fatal(err) + } + + arch, err := driver.Diff(upper, base) + if err != nil { + t.Fatal(err) + } + + buf := bytes.NewBuffer(nil) + if _, err := buf.ReadFrom(arch); err != nil { + t.Fatal(err) + } + if err := arch.Close(); err != nil { + t.Fatal(err) + } + + applyDiffSize, err := driver.ApplyDiff(diff, base, bytes.NewReader(buf.Bytes())) + if err != nil { + t.Fatal(err) + } + + if applyDiffSize != diffSize { + t.Fatalf("Apply diff size different, got %d, expected %d", applyDiffSize, diffSize) + } + + if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { + t.Fatal(err) + } + + if err := checkFileRemoved(driver, diff, deleteFile); err != nil { + t.Fatal(err) + } + + if err := checkFileRemoved(driver, diff, deleteDir); err != nil { + t.Fatal(err) + } +} + +// DriverTestChanges tests computed changes on a layer matches changes made +func DriverTestChanges(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, base, 20, 3); err != nil { + t.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + t.Fatal(err) + } + + expectedChanges, err := changeManyFiles(driver, upper, 20, 6) + if err != nil { + t.Fatal(err) + } + + changes, err := driver.Changes(upper, base) + if err != nil { + t.Fatal(err) + } + + if err = checkChanges(expectedChanges, changes); err != nil { + t.Fatal(err) + } +} + +func writeRandomFile(path string, size uint64) error { + buf := make([]int64, size/8) + + r := rand.NewSource(0) + for i := range buf { + buf[i] = r.Int63() + } + + // Cast to []byte + header := *(*reflect.SliceHeader)(unsafe.Pointer(&buf)) + header.Len *= 8 + header.Cap *= 8 + data := *(*[]byte)(unsafe.Pointer(&header)) + + return ioutil.WriteFile(path, data, 0700) +} + +// DriverTestSetQuota Create a driver and test setting quota. +func DriverTestSetQuota(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + createOpts := &graphdriver.CreateOpts{} + createOpts.StorageOpt = make(map[string]string, 1) + createOpts.StorageOpt["size"] = "50M" + if err := driver.Create("zfsTest", "Base", createOpts); err != nil { + t.Fatal(err) + } + + mountPath, err := driver.Get("zfsTest", "") + if err != nil { + t.Fatal(err) + } + + quota := uint64(50 * units.MiB) + err = writeRandomFile(path.Join(mountPath, "file"), quota*2) + if pathError, ok := err.(*os.PathError); ok && pathError.Err != syscall.EDQUOT { + t.Fatalf("expect write() to fail with %v, got %v", syscall.EDQUOT, err) + } + +} diff --git a/components/engine/daemon/graphdriver/graphtest/graphtest_windows.go b/components/engine/daemon/graphdriver/graphtest/graphtest_windows.go new file mode 100644 index 0000000000..a50c5211e3 --- /dev/null +++ b/components/engine/daemon/graphdriver/graphtest/graphtest_windows.go @@ -0,0 +1 @@ +package graphtest diff --git a/components/engine/daemon/graphdriver/graphtest/testutil.go b/components/engine/daemon/graphdriver/graphtest/testutil.go new file mode 100644 index 0000000000..35bf6d17ba --- /dev/null +++ b/components/engine/daemon/graphdriver/graphtest/testutil.go @@ -0,0 +1,342 @@ +package graphtest + +import ( + "bytes" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "sort" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" +) + +func randomContent(size int, seed int64) []byte { + s := rand.NewSource(seed) + content := make([]byte, size) + + for i := 0; i < len(content); i += 7 { + val := s.Int63() + for j := 0; i+j < len(content) && j < 7; j++ { + content[i+j] = byte(val) + val >>= 8 + } + } + + return content +} + +func addFiles(drv graphdriver.Driver, layer string, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if err := ioutil.WriteFile(path.Join(root, "file-a"), randomContent(64, seed), 0755); err != nil { + return err + } + if err := os.MkdirAll(path.Join(root, "dir-b"), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(root, "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { + return err + } + + return ioutil.WriteFile(path.Join(root, "file-c"), randomContent(128*128, seed+2), 0755) +} + +func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + fileContent, err := ioutil.ReadFile(path.Join(root, filename)) + if err != nil { + return err + } + + if bytes.Compare(fileContent, content) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) + } + + return nil +} + +func addFile(drv graphdriver.Driver, layer, filename string, content []byte) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + return ioutil.WriteFile(path.Join(root, filename), content, 0755) +} + +func addDirectory(drv graphdriver.Driver, layer, dir string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + return os.MkdirAll(path.Join(root, dir), 0755) +} + +func removeAll(drv graphdriver.Driver, layer string, names ...string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for _, filename := range names { + if err := os.RemoveAll(path.Join(root, filename)); err != nil { + return err + } + } + return nil +} + +func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if _, err := os.Stat(path.Join(root, filename)); err == nil { + return fmt.Errorf("file still exists: %s", path.Join(root, filename)) + } else if !os.IsNotExist(err) { + return err + } + + return nil +} + +func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for i := 0; i < count; i += 100 { + dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + for j := 0; i+j < count && j < 100; j++ { + file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) + if err := ioutil.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil { + return err + } + } + } + + return nil +} + +func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) ([]archive.Change, error) { + root, err := drv.Get(layer, "") + if err != nil { + return nil, err + } + defer drv.Put(layer) + + changes := []archive.Change{} + for i := 0; i < count; i += 100 { + archiveRoot := fmt.Sprintf("/directory-%d", i) + if err := os.MkdirAll(path.Join(root, archiveRoot), 0755); err != nil { + return nil, err + } + for j := 0; i+j < count && j < 100; j++ { + if j == 0 { + changes = append(changes, archive.Change{ + Path: archiveRoot, + Kind: archive.ChangeModify, + }) + } + var change archive.Change + switch j % 3 { + // Update file + case 0: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Kind = archive.ChangeModify + if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + return nil, err + } + // Add file + case 1: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) + change.Kind = archive.ChangeAdd + if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + return nil, err + } + // Remove file + case 2: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Kind = archive.ChangeDelete + if err := os.Remove(path.Join(root, change.Path)); err != nil { + return nil, err + } + } + changes = append(changes, change) + } + } + + return changes, nil +} + +func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for i := 0; i < count; i += 100 { + dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + for j := 0; i+j < count && j < 100; j++ { + file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) + fileContent, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + content := randomContent(64, seed+int64(i+j)) + + if bytes.Compare(fileContent, content) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) + } + } + } + + return nil +} + +type changeList []archive.Change + +func (c changeList) Less(i, j int) bool { + if c[i].Path == c[j].Path { + return c[i].Kind < c[j].Kind + } + return c[i].Path < c[j].Path +} +func (c changeList) Len() int { return len(c) } +func (c changeList) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +func checkChanges(expected, actual []archive.Change) error { + if len(expected) != len(actual) { + return fmt.Errorf("unexpected number of changes, expected %d, got %d", len(expected), len(actual)) + } + sort.Sort(changeList(expected)) + sort.Sort(changeList(actual)) + + for i := range expected { + if expected[i] != actual[i] { + return fmt.Errorf("unexpected change, expecting %v, got %v", expected[i], actual[i]) + } + } + + return nil +} + +func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if err := ioutil.WriteFile(path.Join(root, "top-id"), []byte(layer), 0755); err != nil { + return err + } + layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + if err := os.MkdirAll(layerDir, 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil { + return err + } + + return nil +} + +func addManyLayers(drv graphdriver.Driver, baseLayer string, count int) (string, error) { + lastLayer := baseLayer + for i := 1; i <= count; i++ { + nextLayer := stringid.GenerateRandomID() + if err := drv.Create(nextLayer, lastLayer, nil); err != nil { + return "", err + } + if err := addLayerFiles(drv, nextLayer, lastLayer, i); err != nil { + return "", err + } + + lastLayer = nextLayer + + } + return lastLayer, nil +} + +func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + layerIDBytes, err := ioutil.ReadFile(path.Join(root, "top-id")) + if err != nil { + return err + } + + if bytes.Compare(layerIDBytes, []byte(layer)) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", layerIDBytes, []byte(layer)) + } + + for i := count; i > 0; i-- { + layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + + thisLayerIDBytes, err := ioutil.ReadFile(path.Join(layerDir, "layer-id")) + if err != nil { + return err + } + if bytes.Compare(thisLayerIDBytes, layerIDBytes) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes) + } + layerIDBytes, err = ioutil.ReadFile(path.Join(layerDir, "parent-id")) + if err != nil { + return err + } + } + return nil +} + +// readDir reads a directory just like ioutil.ReadDir() +// then hides specific files (currently "lost+found") +// so the tests don't "see" it +func readDir(dir string) ([]os.FileInfo, error) { + a, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + + b := a[:0] + for _, x := range a { + if x.Name() != "lost+found" { // ext4 always have this dir + b = append(b, x) + } + } + + return b, nil +} diff --git a/components/engine/daemon/graphdriver/graphtest/testutil_unix.go b/components/engine/daemon/graphdriver/graphtest/testutil_unix.go new file mode 100644 index 0000000000..49b0c2cc35 --- /dev/null +++ b/components/engine/daemon/graphdriver/graphtest/testutil_unix.go @@ -0,0 +1,143 @@ +// +build linux freebsd + +package graphtest + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" +) + +// InitLoopbacks ensures that the loopback devices are properly created within +// the system running the device mapper tests. +func InitLoopbacks() error { + statT, err := getBaseLoopStats() + if err != nil { + return err + } + // create at least 8 loopback files, ya, that is a good number + for i := 0; i < 8; i++ { + loopPath := fmt.Sprintf("/dev/loop%d", i) + // only create new loopback files if they don't exist + if _, err := os.Stat(loopPath); err != nil { + if mkerr := syscall.Mknod(loopPath, + uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { + return mkerr + } + os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) + } + } + return nil +} + +// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the +// loop0 device on the system. If it does not exist we assume 0,0,0660 for the +// stat data +func getBaseLoopStats() (*syscall.Stat_t, error) { + loop0, err := os.Stat("/dev/loop0") + if err != nil { + if os.IsNotExist(err) { + return &syscall.Stat_t{ + Uid: 0, + Gid: 0, + Mode: 0660, + }, nil + } + return nil, err + } + return loop0.Sys().(*syscall.Stat_t), nil +} + +func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) { + fi, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + + if fi.Mode()&os.ModeType != mode&os.ModeType { + t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) + } + + if fi.Mode()&os.ModePerm != mode&os.ModePerm { + t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) + } + + if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { + t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) + } + + if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { + t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) + } + + if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { + t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) + } + + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + if stat.Uid != uid { + t.Fatalf("%s no owned by uid %d", path, uid) + } + if stat.Gid != gid { + t.Fatalf("%s not owned by gid %d", path, gid) + } + } +} + +func createBase(t testing.TB, driver graphdriver.Driver, name string) { + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + if err := driver.CreateReadWrite(name, "", nil); err != nil { + t.Fatal(err) + } + + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { + t.Fatal(err) + } + if err := os.Chown(subdir, 1, 2); err != nil { + t.Fatal(err) + } + + file := path.Join(dir, "a file") + if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { + t.Fatal(err) + } +} + +func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) + + file := path.Join(dir, "a file") + verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) + + fis, err := readDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 2 { + t.Fatal("Unexpected files in base image") + } + +} diff --git a/components/engine/daemon/graphdriver/overlay/copy.go b/components/engine/daemon/graphdriver/overlay/copy.go new file mode 100644 index 0000000000..666a5c0e04 --- /dev/null +++ b/components/engine/daemon/graphdriver/overlay/copy.go @@ -0,0 +1,174 @@ +// +build linux + +package overlay + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type copyFlags int + +const ( + copyHardlink copyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags copyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags©Hardlink != 0 { + isHardlink = true + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/components/engine/daemon/graphdriver/overlay/overlay.go b/components/engine/daemon/graphdriver/overlay/overlay.go new file mode 100644 index 0000000000..88d6602e07 --- /dev/null +++ b/components/engine/daemon/graphdriver/overlay/overlay.go @@ -0,0 +1,469 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "strconv" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/overlayutils" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fsutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/selinux/go-selinux/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") + backingFs = "" +) + +// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + // ApplyDiff writes the diff to the archive for the given id and parent id. + // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), + applyDiff: driver, + } +} + +// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When an overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with an empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy in the parent's "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + supportsDType bool + locker *locker.Locker +} + +func init() { + graphdriver.Register("overlay", Init) +} + +// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + supportsDType, err := fsutils.SupportsDType(home) + if err != nil { + return nil, err + } + if !supportsDType { + // not a fatal error until v17.12 (#27443) + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + locker: locker.New(), + } + + return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + } +} + +// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := make(map[string]string) + + // If id has a root, it is an image + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + metadata["RootDir"] = rootDir + return metadata, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return nil, err + } + + metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") + metadata["UpperDir"] = path.Join(dir, "upper") + metadata["WorkDir"] = path.Join(dir, "work") + metadata["MergedDir"] = path.Join(dir, "merged") + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for overlay") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do an overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + return system.EnsureRemoveAll(d.dir(id)) +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + return rootDir, nil + } + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + var ( + lowerDir = path.Join(d.dir(string(lowerID)), "root") + upperDir = path.Join(dir, "upper") + workDir = path.Join(dir, "work") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + ) + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + // If id has a root, just return + if _, err := os.Stat(path.Join(d.dir(id), "root")); err == nil { + return nil + } + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if err := syscall.Unmount(mountpoint, 0); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return nil +} + +// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return an ErrApplyDiffFallback error. +func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { + return 0, err + } + + options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} + if size, err = graphdriver.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + return +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/components/engine/daemon/graphdriver/overlay/overlay_test.go b/components/engine/daemon/graphdriver/overlay/overlay_test.go new file mode 100644 index 0000000000..34b6d801fd --- /dev/null +++ b/components/engine/daemon/graphdriver/overlay/overlay_test.go @@ -0,0 +1,93 @@ +// +build linux + +package overlay + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/pkg/archive" +) + +func init() { + // Do not sure chroot to speed run time and allow archive + // errors or hangs to be debugged directly from the test process. + graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, "overlay") +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "overlay") +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "overlay") +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "overlay") +} + +func TestOverlay50LayerRead(t *testing.T) { + graphtest.DriverTestDeepLayerRead(t, 50, "overlay") +} + +// Fails due to bug in calculating changes after apply +// likely related to https://github.com/docker/docker/issues/21555 +func TestOverlayDiffApply10Files(t *testing.T) { + t.Skipf("Fails to compute changes after apply intermittently") + graphtest.DriverTestDiffApply(t, 10, "overlay") +} + +func TestOverlayChanges(t *testing.T) { + t.Skipf("Fails to compute changes intermittently") + graphtest.DriverTestChanges(t, "overlay") +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +// Benchmarks should always setup new driver + +func BenchmarkExists(b *testing.B) { + graphtest.DriverBenchExists(b, "overlay") +} + +func BenchmarkGetEmpty(b *testing.B) { + graphtest.DriverBenchGetEmpty(b, "overlay") +} + +func BenchmarkDiffBase(b *testing.B) { + graphtest.DriverBenchDiffBase(b, "overlay") +} + +func BenchmarkDiffSmallUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10, "overlay") +} + +func BenchmarkDiff10KFileUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10000, "overlay") +} + +func BenchmarkDiff10KFilesBottom(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10000, 10, "overlay") +} + +func BenchmarkDiffApply100(b *testing.B) { + graphtest.DriverBenchDiffApplyN(b, 100, "overlay") +} + +func BenchmarkDiff20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerDiff(b, 20, "overlay") +} + +func BenchmarkRead20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerRead(b, 20, "overlay") +} diff --git a/components/engine/daemon/graphdriver/overlay/overlay_unsupported.go b/components/engine/daemon/graphdriver/overlay/overlay_unsupported.go new file mode 100644 index 0000000000..3dbb4de44e --- /dev/null +++ b/components/engine/daemon/graphdriver/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/components/engine/daemon/graphdriver/overlay2/check.go b/components/engine/daemon/graphdriver/overlay2/check.go new file mode 100644 index 0000000000..53a7199292 --- /dev/null +++ b/components/engine/daemon/graphdriver/overlay2/check.go @@ -0,0 +1,79 @@ +// +build linux + +package overlay2 + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// hasOpaqueCopyUpBug checks whether the filesystem has a bug +// which copies up the opaque flag when copying up an opaque +// directory. When this bug exists naive diff should be used. +func hasOpaqueCopyUpBug(d string) error { + td, err := ioutil.TempDir(d, "opaque-bug-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1/d, l2/d, l3, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return err + } + + // Mark l2/d as opaque + if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + return errors.Wrap(err, "failed to set opaque flag on middle layer") + } + + opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) + if err := syscall.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { + return errors.Wrap(err, "failed to mount overlay") + } + defer func() { + if err := syscall.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + + // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" + if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + return errors.Wrap(err, "failed to write to merged directory") + } + + // Check l3/d does not have opaque flag + xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque") + if err != nil { + return errors.Wrap(err, "failed to read opaque flag on upper layer") + } + if string(xattrOpaque) == "y" { + return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") + } + + return nil +} diff --git a/components/engine/daemon/graphdriver/overlay2/mount.go b/components/engine/daemon/graphdriver/overlay2/mount.go new file mode 100644 index 0000000000..60e248b6d7 --- /dev/null +++ b/components/engine/daemon/graphdriver/overlay2/mount.go @@ -0,0 +1,88 @@ +// +build linux + +package overlay2 + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-mountfrom", mountFromMain) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +type mountOptions struct { + Device string + Target string + Type string + Label string + Flag uint32 +} + +func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { + options := &mountOptions{ + Device: device, + Target: target, + Type: mType, + Flag: uint32(flags), + Label: label, + } + + cmd := reexec.Command("docker-mountfrom", dir) + w, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("mountfrom error on pipe creation: %v", err) + } + + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output) + } + return nil +} + +// mountfromMain is the entry-point for docker-mountfrom on re-exec. +func mountFromMain() { + runtime.LockOSThread() + flag.Parse() + + var options *mountOptions + + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + if err := os.Chdir(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + + os.Exit(0) +} diff --git a/components/engine/daemon/graphdriver/overlay2/overlay.go b/components/engine/daemon/graphdriver/overlay2/overlay.go new file mode 100644 index 0000000000..e327ec508a --- /dev/null +++ b/components/engine/daemon/graphdriver/overlay2/overlay.go @@ -0,0 +1,692 @@ +// +build linux + +package overlay2 + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/overlayutils" + "github.com/docker/docker/daemon/graphdriver/quota" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/fsutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/system" + units "github.com/docker/go-units" + + "github.com/opencontainers/selinux/go-selinux/label" +) + +var ( + // untar defines the untar method + untar = chrootarchive.UntarUncompressed +) + +// This backend uses the overlay union filesystem for containers +// with diff directories for each layer. + +// This version of the overlay driver requires at least kernel +// 4.0.0 in order to support mounting multiple diff directories. + +// Each container/image has at least a "diff" directory and "link" file. +// If there is also a "lower" file when there are diff layers +// below as well as "merged" and "work" directories. The "diff" directory +// has the upper layer of the overlay and is used to capture any +// changes to the layer. The "lower" file contains all the lower layer +// mounts separated by ":" and ordered from uppermost to lowermost +// layers. The overlay itself is mounted in the "merged" directory, +// and the "work" dir is needed for overlay to work. + +// The "link" file for each layer contains a unique string for the layer. +// Under the "l" directory at the root there will be a symbolic link +// with that unique string pointing the "diff" directory for the layer. +// The symbolic links are used to reference lower layers in the "lower" +// file and on mount. The links are used to shorten the total length +// of a layer reference without requiring changes to the layer identifier +// or root directory. Mounts are always done relative to root and +// referencing the symbolic links in order to ensure the number of +// lower directories can fit in a single page for making the mount +// syscall. A hard upper limit of 128 lower layers is enforced to ensure +// that mounts do not fail due to length. + +const ( + driverName = "overlay2" + linkDir = "l" + lowerFile = "lower" + maxDepth = 128 + + // idLength represents the number of random characters + // which can be used to create the unique link identifer + // for every layer. If this value is too long then the + // page size limit for the mount command may be exceeded. + // The idLength should be selected such that following equation + // is true (512 is a buffer for label metadata). + // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) + idLength = 26 +) + +type overlayOptions struct { + overrideKernelCheck bool + quota quota.Quota +} + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + quotaCtl *quota.Control + options overlayOptions + naiveDiff graphdriver.DiffDriver + supportsDType bool + locker *locker.Locker +} + +var ( + backingFs = "" + projectQuotaSupported = false + + useNaiveDiffLock sync.Once + useNaiveDiffOnly bool +) + +func init() { + graphdriver.Register(driverName, Init) +} + +// Init returns the a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + opts, err := parseOptions(options) + if err != nil { + return nil, err + } + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + // require kernel 4.0.0 to ensure multiple lower dirs are supported + v, err := kernel.GetKernelVersion() + if err != nil { + return nil, err + } + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { + if !opts.overrideKernelCheck { + return nil, graphdriver.ErrNotSupported + } + logrus.Warn("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update") + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay2' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicBtrfs: + // Support for OverlayFS on BTRFS was added in kernel 4.7 + // See https://btrfs.wiki.kernel.org/index.php/Changelog + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 { + if !opts.overrideKernelCheck { + logrus.Errorf("'overlay2' requires kernel 4.7 to use on %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + logrus.Warn("Using pre-4.7.0 kernel for overlay2 on btrfs, may require kernel update") + } + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + supportsDType, err := fsutils.SupportsDType(home) + if err != nil { + return nil, err + } + if !supportsDType { + // not a fatal error until v17.12 (#27443) + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + locker: locker.New(), + } + + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) + + if backingFs == "xfs" { + // Try to enable project quota support over xfs. + if d.quotaCtl, err = quota.NewControl(home); err == nil { + projectQuotaSupported = true + } + } + + logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) + + return d, nil +} + +func parseOptions(options []string) (*overlayOptions, error) { + o := &overlayOptions{} + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "overlay2.override_kernel_check": + o.overrideKernelCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("overlay2: Unknown option %s\n", key) + } + } + return o, nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func useNaiveDiff(home string) bool { + useNaiveDiffLock.Do(func() { + if err := hasOpaqueCopyUpBug(home); err != nil { + logrus.Warnf("Not using native diff for overlay2: %v", err) + useNaiveDiffOnly = true + } + }) + return useNaiveDiffOnly +} + +func (d *Driver) String() string { + return driverName +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, + } +} + +// GetMetadata returns meta data about the overlay driver such as +// LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := map[string]string{ + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), + } + + lowerDirs, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + if len(lowerDirs) > 0 { + metadata["LowerDir"] = strings.Join(lowerDirs, ":") + } + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + + if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { + return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + if opts != nil && len(opts.StorageOpt) > 0 { + driver := &Driver{} + if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { + return err + } + + if driver.options.quota.Size > 0 { + // Set container disk quota limit + if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { + return err + } + } + } + + if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { + return err + } + + lid := generateID(idLength) + if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { + return err + } + + // Write link id to link file + if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + return err + } + + // if no parent directory, done + if parent == "" { + return nil + } + + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + lower, err := d.getLower(parent) + if err != nil { + return err + } + if lower != "" { + if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + return err + } + } + + return nil +} + +// Parse overlay storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to set the disk project quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.quota.Size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +func (d *Driver) getLower(parent string) (string, error) { + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return "", err + } + + // Read Parent link fileA + parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) + if err != nil { + return "", err + } + lowers := []string{path.Join(linkDir, string(parentLink))} + + parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) + if err == nil { + parentLowers := strings.Split(string(parentLower), ":") + lowers = append(lowers, parentLowers...) + } + if len(lowers) > maxDepth { + return "", errors.New("max depth exceeded") + } + return strings.Join(lowers, ":"), nil +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +func (d *Driver) getLowerDirs(id string) ([]string, error) { + var lowersArray []string + lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) + if err == nil { + for _, s := range strings.Split(string(lowers), ":") { + lp, err := os.Readlink(path.Join(d.home, s)) + if err != nil { + return nil, err + } + lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) + } + } else if !os.IsNotExist(err) { + return nil, err + } + return lowersArray, nil +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + lid, err := ioutil.ReadFile(path.Join(dir, "link")) + if err == nil { + if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + logrus.Debugf("Failed to remove link: %v", err) + } + } + + if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + diffDir := path.Join(dir, "diff") + lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + // If no lower, just return diff directory + if os.IsNotExist(err) { + return diffDir, nil + } + return "", err + } + + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + + workDir := path.Join(dir, "work") + splitLowers := strings.Split(string(lowers), ":") + absLowers := make([]string, len(splitLowers)) + for i, s := range splitLowers { + absLowers[i] = path.Join(d.home, s) + } + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), path.Join(dir, "diff"), path.Join(dir, "work")) + mountData := label.FormatMountLabel(opts, mountLabel) + mount := syscall.Mount + mountTarget := mergedDir + + pageSize := syscall.Getpagesize() + + // Go can return a larger page size than supported by the system + // as of go 1.7. This will be fixed in 1.8 and this block can be + // removed when building with 1.8. + // See https://github.com/golang/go/commit/1b9499b06989d2831e5b156161d6c07642926ee1 + // See https://github.com/docker/docker/issues/27384 + if pageSize > 4096 { + pageSize = 4096 + } + + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + if len(mountData) > pageSize { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) + mountData = label.FormatMountLabel(opts, mountLabel) + if len(mountData) > pageSize { + return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) + } + + mount = func(source string, target string, mType string, flags uintptr, label string) error { + return mountFrom(d.home, source, target, mType, flags, label) + } + mountTarget = path.Join(id, "merged") + } + + if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + // If no lower, no mount happened and just return directly + if os.IsNotExist(err) { + return nil + } + return err + } + + mountpoint := path.Join(dir, "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if err := syscall.Unmount(mountpoint, 0); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + } + return nil +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (d *Driver) isParent(id, parent string) bool { + lowers, err := d.getLowerDirs(id) + if err != nil { + return false + } + if parent == "" && len(lowers) > 0 { + return false + } + + parentDir := d.dir(parent) + var ld string + if len(lowers) > 0 { + ld = filepath.Dir(lowers[0]) + } + if ld == "" && parent == "" { + return true + } + return ld == parentDir +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { + if !d.isParent(id, parent) { + return d.naiveDiff.ApplyDiff(id, parent, diff) + } + + applyDir := d.getDiffPath(id) + + logrus.Debugf("Applying tar in %s", applyDir) + // Overlay doesn't need the parent id to apply the diff + if err := untar(diff, applyDir, &archive.TarOptions{ + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }); err != nil { + return 0, err + } + + return directory.Size(applyDir) +} + +func (d *Driver) getDiffPath(id string) string { + dir := d.dir(id) + + return path.Join(dir, "diff") +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.DiffSize(id, parent) + } + return directory.Size(d.getDiffPath(id)) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Diff(id, parent) + } + + diffPath := d.getDiffPath(id) + logrus.Debugf("Tar with options on %s", diffPath) + return archive.TarWithOptions(diffPath, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Changes(id, parent) + } + // Overlay doesn't have snapshots, so we need to get changes from all parent + // layers. + diffPath := d.getDiffPath(id) + layers, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + + return archive.OverlayChanges(layers, diffPath) +} diff --git a/components/engine/daemon/graphdriver/overlay2/overlay_test.go b/components/engine/daemon/graphdriver/overlay2/overlay_test.go new file mode 100644 index 0000000000..cf77ff22be --- /dev/null +++ b/components/engine/daemon/graphdriver/overlay2/overlay_test.go @@ -0,0 +1,121 @@ +// +build linux + +package overlay2 + +import ( + "io/ioutil" + "os" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func init() { + // Do not sure chroot to speed run time and allow archive + // errors or hangs to be debugged directly from the test process. + untar = archive.UntarUncompressed + graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer + + reexec.Init() +} + +func cdMountFrom(dir, device, target, mType, label string) error { + wd, err := os.Getwd() + if err != nil { + return err + } + os.Chdir(dir) + defer os.Chdir(wd) + + return syscall.Mount(device, target, mType, 0, label) +} + +func skipIfNaive(t *testing.T) { + td, err := ioutil.TempDir("", "naive-check-") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(td) + + if useNaiveDiff(td) { + t.Skipf("Cannot run test with naive diff") + } +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, driverName) +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, driverName) +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, driverName) +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, driverName) +} + +func TestOverlay128LayerRead(t *testing.T) { + graphtest.DriverTestDeepLayerRead(t, 128, driverName) +} + +func TestOverlayDiffApply10Files(t *testing.T) { + skipIfNaive(t) + graphtest.DriverTestDiffApply(t, 10, driverName) +} + +func TestOverlayChanges(t *testing.T) { + skipIfNaive(t) + graphtest.DriverTestChanges(t, driverName) +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +// Benchmarks should always setup new driver + +func BenchmarkExists(b *testing.B) { + graphtest.DriverBenchExists(b, driverName) +} + +func BenchmarkGetEmpty(b *testing.B) { + graphtest.DriverBenchGetEmpty(b, driverName) +} + +func BenchmarkDiffBase(b *testing.B) { + graphtest.DriverBenchDiffBase(b, driverName) +} + +func BenchmarkDiffSmallUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10, driverName) +} + +func BenchmarkDiff10KFileUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10000, driverName) +} + +func BenchmarkDiff10KFilesBottom(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10000, 10, driverName) +} + +func BenchmarkDiffApply100(b *testing.B) { + graphtest.DriverBenchDiffApplyN(b, 100, driverName) +} + +func BenchmarkDiff20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerDiff(b, 20, driverName) +} + +func BenchmarkRead20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerRead(b, 20, driverName) +} diff --git a/components/engine/daemon/graphdriver/overlay2/overlay_unsupported.go b/components/engine/daemon/graphdriver/overlay2/overlay_unsupported.go new file mode 100644 index 0000000000..e5ac4ca8c6 --- /dev/null +++ b/components/engine/daemon/graphdriver/overlay2/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay2 diff --git a/components/engine/daemon/graphdriver/overlay2/randomid.go b/components/engine/daemon/graphdriver/overlay2/randomid.go new file mode 100644 index 0000000000..af5cb659d5 --- /dev/null +++ b/components/engine/daemon/graphdriver/overlay2/randomid.go @@ -0,0 +1,80 @@ +// +build linux + +package overlay2 + +import ( + "crypto/rand" + "encoding/base32" + "fmt" + "io" + "os" + "syscall" + "time" + + "github.com/Sirupsen/logrus" +) + +// generateID creates a new random string identifier with the given length +func generateID(l int) string { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + size = (l*5 + 7) / 8 + u = make([]byte, size) + ) + // TODO: Include time component, counter component, random component + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + logrus.Errorf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + s := base32.StdEncoding.EncodeToString(u) + + return s[:l] +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/components/engine/daemon/graphdriver/overlayutils/overlayutils.go b/components/engine/daemon/graphdriver/overlayutils/overlayutils.go new file mode 100644 index 0000000000..7491c3457c --- /dev/null +++ b/components/engine/daemon/graphdriver/overlayutils/overlayutils.go @@ -0,0 +1,18 @@ +// +build linux + +package overlayutils + +import ( + "errors" + "fmt" +) + +// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. +func ErrDTypeNotSupported(driver, backingFs string) error { + msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support." + } + msg += " Running without d_type support will no longer be supported in Docker 17.12." + return errors.New(msg) +} diff --git a/components/engine/daemon/graphdriver/plugin.go b/components/engine/daemon/graphdriver/plugin.go new file mode 100644 index 0000000000..f6852f0752 --- /dev/null +++ b/components/engine/daemon/graphdriver/plugin.go @@ -0,0 +1,43 @@ +package graphdriver + +import ( + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/plugin/v2" +) + +type pluginClient interface { + // Call calls the specified method with the specified arguments for the plugin. + Call(string, interface{}, interface{}) error + // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream + Stream(string, interface{}) (io.ReadCloser, error) + // SendFile calls the specified method, and passes through the IO stream + SendFile(string, io.Reader, interface{}) error +} + +func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if !config.ExperimentalEnabled { + return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode") + } + pl, err := pg.Get(name, "GraphDriver", plugingetter.Acquire) + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, pl, config) +} + +func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) { + home := config.Root + if !pl.IsV1() { + if p, ok := pl.(*v2.Plugin); ok { + if p.PropagatedMount != "" { + home = p.PluginObj.Config.PropagatedMount + } + } + } + proxy := &graphDriverProxy{name, pl, Capabilities{}} + return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) +} diff --git a/components/engine/daemon/graphdriver/proxy.go b/components/engine/daemon/graphdriver/proxy.go new file mode 100644 index 0000000000..120afad459 --- /dev/null +++ b/components/engine/daemon/graphdriver/proxy.go @@ -0,0 +1,263 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +type graphDriverProxy struct { + name string + p plugingetter.CompatPlugin + caps Capabilities +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + StorageOpt map[string]string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` + Capabilities Capabilities `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string `json:"Opts"` + UIDMaps []idtools.IDMap `json:"UIDMaps"` + GIDMaps []idtools.IDMap `json:"GIDMaps"` +} + +func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always acquire here, it will be cleaned up on daemon shutdown + cp.Acquire() + } + } + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + caps, err := d.fetchCaps() + if err != nil { + return err + } + d.caps = caps + return nil +} + +func (d *graphDriverProxy) fetchCaps() (Capabilities, error) { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Capabilities", args, &ret); err != nil { + if !plugins.IsNotFound(err) { + return Capabilities{}, err + } + } + return ret.Capabilities, nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) Capabilities() Capabilities { + return d.caps +} + +func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.CreateReadWrite", id, parent, opts) +} + +func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.Create", id, parent, opts) +} + +func (d *graphDriverProxy) create(method, id, parent string, opts *CreateOpts) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + if opts != nil { + args.MountLabel = opts.MountLabel + args.StorageOpt = opts.StorageOpt + } + var ret graphDriverResponse + if err := d.p.Client().Call(method, args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil { + return "", err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return filepath.Join(d.p.BasePath(), ret.Dir), err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always release + defer cp.Release() + } + } + + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.p.Client().Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return body, nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.p.Client().SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/components/engine/daemon/graphdriver/quota/projectquota.go b/components/engine/daemon/graphdriver/quota/projectquota.go new file mode 100644 index 0000000000..e408d5f906 --- /dev/null +++ b/components/engine/daemon/graphdriver/quota/projectquota.go @@ -0,0 +1,339 @@ +// +build linux + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef XFS_PROJ_QUOTA +#define XFS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +// Quota limit params - currently we only control blocks hard limit +type Quota struct { + Size uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas map[string]uint32 +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the home directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 999:/var/lib/docker/overlay2 >> /etc/projects +// echo docker:999 >> /etc/projid +// xfs_quota -x -c 'project -s docker' / +// +// In that case, the home directory project id will be used as a "start offset" +// and all containers will be assigned larger project ids (e.g. >= 1000). +// This is a way to prevent xfs_quota management from conflicting with docker. +// +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +// +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + minProjectID++ + + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + } + if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { + return nil, err + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: make(map[string]uint32), + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID(basePath) + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas[targetPath] = projectID + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) + return setProjectQuota(q.backingFsBlockDev, projectID, quota) +} + +// setProjectQuota - set the quota for project id on xfs block device +func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.XFS_PROJ_QUOTA + + d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + + var cs = C.CString(backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", + projectID, backingFsBlockDev, errno.Error()) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + return fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + var d C.fs_disk_quota_t + + var cs = C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", + projectID, q.backingFsBlockDev, errno.Error()) + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + + return nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID(home string) error { + files, err := ioutil.ReadDir(home) + if err != nil { + return fmt.Errorf("read directory failed : %s", home) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(home, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas[path] = projid + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + fileinfo, err := os.Stat(home) + if err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, "backingFsBlockDev") + // Re-create just in case comeone copied the home directory over to a new device + syscall.Unlink(backingFsBlockDev) + stat := fileinfo.Sys().(*syscall.Stat_t) + if err := syscall.Mknod(backingFsBlockDev, syscall.S_IFBLK|0600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/components/engine/daemon/graphdriver/register/register_aufs.go b/components/engine/daemon/graphdriver/register/register_aufs.go new file mode 100644 index 0000000000..262954d6e3 --- /dev/null +++ b/components/engine/daemon/graphdriver/register/register_aufs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_aufs,linux + +package register + +import ( + // register the aufs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/aufs" +) diff --git a/components/engine/daemon/graphdriver/register/register_btrfs.go b/components/engine/daemon/graphdriver/register/register_btrfs.go new file mode 100644 index 0000000000..f456cc5ce5 --- /dev/null +++ b/components/engine/daemon/graphdriver/register/register_btrfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_btrfs,linux + +package register + +import ( + // register the btrfs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/btrfs" +) diff --git a/components/engine/daemon/graphdriver/register/register_devicemapper.go b/components/engine/daemon/graphdriver/register/register_devicemapper.go new file mode 100644 index 0000000000..bb2e9ef541 --- /dev/null +++ b/components/engine/daemon/graphdriver/register/register_devicemapper.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_devicemapper,linux + +package register + +import ( + // register the devmapper graphdriver + _ "github.com/docker/docker/daemon/graphdriver/devmapper" +) diff --git a/components/engine/daemon/graphdriver/register/register_overlay.go b/components/engine/daemon/graphdriver/register/register_overlay.go new file mode 100644 index 0000000000..9ba849cedc --- /dev/null +++ b/components/engine/daemon/graphdriver/register/register_overlay.go @@ -0,0 +1,9 @@ +// +build !exclude_graphdriver_overlay,linux + +package register + +import ( + // register the overlay graphdriver + _ "github.com/docker/docker/daemon/graphdriver/overlay" + _ "github.com/docker/docker/daemon/graphdriver/overlay2" +) diff --git a/components/engine/daemon/graphdriver/register/register_vfs.go b/components/engine/daemon/graphdriver/register/register_vfs.go new file mode 100644 index 0000000000..98fad23b20 --- /dev/null +++ b/components/engine/daemon/graphdriver/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/docker/docker/daemon/graphdriver/vfs" +) diff --git a/components/engine/daemon/graphdriver/register/register_windows.go b/components/engine/daemon/graphdriver/register/register_windows.go new file mode 100644 index 0000000000..efaa5005ed --- /dev/null +++ b/components/engine/daemon/graphdriver/register/register_windows.go @@ -0,0 +1,6 @@ +package register + +import ( + // register the windows graph driver + _ "github.com/docker/docker/daemon/graphdriver/windows" +) diff --git a/components/engine/daemon/graphdriver/register/register_zfs.go b/components/engine/daemon/graphdriver/register/register_zfs.go new file mode 100644 index 0000000000..8f34e35537 --- /dev/null +++ b/components/engine/daemon/graphdriver/register/register_zfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris + +package register + +import ( + // register the zfs driver + _ "github.com/docker/docker/daemon/graphdriver/zfs" +) diff --git a/components/engine/daemon/graphdriver/vfs/driver.go b/components/engine/daemon/graphdriver/vfs/driver.go new file mode 100644 index 0000000000..846215e810 --- /dev/null +++ b/components/engine/daemon/graphdriver/vfs/driver.go @@ -0,0 +1,146 @@ +package vfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + + "github.com/opencontainers/selinux/go-selinux/label" +) + +var ( + // CopyWithTar defines the copy method to use. + CopyWithTar = chrootarchive.CopyWithTar +) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for vfs") + } + + dir := d.dir(id) + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { + return err + } + labelOpts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return filepath.Join(d.home, "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + if err := system.EnsureRemoveAll(d.dir(id)); err != nil { + return err + } + return nil +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/components/engine/daemon/graphdriver/vfs/vfs_test.go b/components/engine/daemon/graphdriver/vfs/vfs_test.go new file mode 100644 index 0000000000..9ecf21dbaa --- /dev/null +++ b/components/engine/daemon/graphdriver/vfs/vfs_test.go @@ -0,0 +1,37 @@ +// +build linux + +package vfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestVfsSetup and TestVfsTeardown +func TestVfsSetup(t *testing.T) { + graphtest.GetDriver(t, "vfs") +} + +func TestVfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "vfs") +} + +func TestVfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "vfs") +} + +func TestVfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "vfs") +} + +func TestVfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/components/engine/daemon/graphdriver/windows/windows.go b/components/engine/daemon/graphdriver/windows/windows.go new file mode 100644 index 0000000000..441621f6eb --- /dev/null +++ b/components/engine/daemon/graphdriver/windows/windows.go @@ -0,0 +1,935 @@ +//+build windows + +package windows + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/archive/tar" + "github.com/Microsoft/go-winio/backuptar" + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/longpath" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + units "github.com/docker/go-units" + "golang.org/x/sys/windows" +) + +// filterDriver is an HCSShim driver type for the Windows Filter driver. +const filterDriver = 1 + +var ( + // mutatedFiles is a list of files that are mutated by the import process + // and must be backed up and restored. + mutatedFiles = map[string]string{ + "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", + } + noreexec = false +) + +// init registers the windows graph drivers to the register. +func init() { + graphdriver.Register("windowsfilter", InitFilter) + // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes + // debugging issues in the re-exec codepath significantly easier. + if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { + logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") + noreexec = true + } else { + reexec.Register("docker-windows-write-layer", writeLayerReexec) + } +} + +type checker struct { +} + +func (c *checker) IsMounted(path string) bool { + return false +} + +// Driver represents a windows graph driver. +type Driver struct { + // info stores the shim driver information + info hcsshim.DriverInfo + ctr *graphdriver.RefCounter + // it is safe for windows to use a cache here because it does not support + // restoring containers when the daemon dies. + cacheMu sync.Mutex + cache map[string]string +} + +// InitFilter returns a new Windows storage filter driver. +func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + + fsType, err := getFileSystemType(string(home[0])) + if err != nil { + return nil, err + } + if strings.ToLower(fsType) == "refs" { + return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) + } + + d := &Driver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: filterDriver, + }, + cache: make(map[string]string), + ctr: graphdriver.NewRefCounter(&checker{}), + } + return d, nil +} + +// win32FromHresult is a helper function to get the win32 error code from an HRESULT +func win32FromHresult(hr uintptr) uintptr { + if hr&0x1fff0000 == 0x00070000 { + return hr & 0xffff + } + return hr +} + +// getFileSystemType obtains the type of a file system through GetVolumeInformation +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx +func getFileSystemType(drive string) (fsType string, hr error) { + var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") + buf = make([]uint16, 255) + size = syscall.MAX_PATH + 1 + ) + if len(drive) != 1 { + hr = errors.New("getFileSystemType must be called with a drive letter") + return + } + drive += `:\` + n := uintptr(unsafe.Pointer(nil)) + r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + fsType = syscall.UTF16ToString(buf) + return +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "windowsfilter" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Windows", ""}, + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + rID, err := d.resolveID(id) + if err != nil { + return false + } + result, err := hcsshim.LayerExists(d.info, rID) + if err != nil { + return false + } + return result +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil { + return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) + } + return d.create(id, parent, "", false, nil) +} + +// Create creates a new read-only layer with the given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil { + return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) + } + return d.create(id, parent, "", true, nil) +} + +func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { + rPId, err := d.resolveID(parent) + if err != nil { + return err + } + + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return err + } + + var layerChain []string + + if rPId != "" { + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return err + } + if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { + // This is a legitimate parent layer (not the empty "-init" layer), + // so include it in the layer chain. + layerChain = []string{parentPath} + } + } + + layerChain = append(layerChain, parentChain...) + + if readOnly { + if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { + return err + } + } else { + var parentPath string + if len(layerChain) != 0 { + parentPath = layerChain[0] + } + + if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { + return err + } + + storageOptions, err := parseStorageOpt(storageOpt) + if err != nil { + return fmt.Errorf("Failed to parse storage options - %s", err) + } + + if storageOptions.size != 0 { + if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { + return err + } + } + } + + if _, err := os.Lstat(d.dir(parent)); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return err + } + + return nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.info.HomeDir, filepath.Base(id)) +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + rID, err := d.resolveID(id) + if err != nil { + return err + } + + // This retry loop is due to a bug in Windows (Internal bug #9432268) + // if GetContainers fails with ErrVmcomputeOperationInvalidState + // it is a transient error. Retry until it succeeds. + var computeSystems []hcsshim.ContainerProperties + retryCount := 0 + osv := system.GetOSVersion() + for { + // Get and terminate any template VMs that are currently using the layer. + // Note: It is unfortunate that we end up in the graphdrivers Remove() call + // for both containers and images, but the logic for template VMs is only + // needed for images - specifically we are looking to see if a base layer + // is in use by a template VM as a result of having started a Hyper-V + // container at some point. + // + // We have a retry loop for ErrVmcomputeOperationInvalidState and + // ErrVmcomputeOperationAccessIsDenied as there is a race condition + // in RS1 and RS2 building during enumeration when a silo is going away + // for example under it, in HCS. AccessIsDenied added to fix 30278. + // + // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider + // using platform APIs (if available) to get this more succinctly. Also + // consider enlighting the Remove() interface to have context of why + // the remove is being called - that could improve efficiency by not + // enumerating compute systems during a remove of a container as it's + // not required. + computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) + if err != nil { + if (osv.Build < 15139) && + ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) { + if retryCount >= 500 { + break + } + retryCount++ + time.Sleep(10 * time.Millisecond) + continue + } + return err + } + break + } + + for _, computeSystem := range computeSystems { + if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { + container, err := hcsshim.OpenContainer(computeSystem.ID) + if err != nil { + return err + } + defer container.Close() + err = container.Terminate() + if hcsshim.IsPending(err) { + err = container.Wait() + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + return err + } + } + } + + layerPath := filepath.Join(d.info.HomeDir, rID) + tmpID := fmt.Sprintf("%s-removing", rID) + tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { + logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) + } + + return nil +} + +// Get returns the rootfs path for the id. This will mount the dir at its given path. +func (d *Driver) Get(id, mountLabel string) (string, error) { + logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) + var dir string + + rID, err := d.resolveID(id) + if err != nil { + return "", err + } + if count := d.ctr.Increment(rID); count > 1 { + return d.cache[rID], nil + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(rID) + if err != nil { + d.ctr.Decrement(rID) + return "", err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + d.ctr.Decrement(rID) + return "", err + } + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + d.ctr.Decrement(rID) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + + mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) + if err != nil { + d.ctr.Decrement(rID) + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + logrus.Warnf("Failed to Unprepare %s: %s", id, err) + } + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + d.cacheMu.Lock() + d.cache[rID] = mountPath + d.cacheMu.Unlock() + + // If the layer has a mount path, use that. Otherwise, use the + // folder path. + if mountPath != "" { + dir = mountPath + } else { + dir = d.dir(id) + } + + return dir, nil +} + +// Put adds a new layer to the driver. +func (d *Driver) Put(id string) error { + logrus.Debugf("WindowsGraphDriver Put() id %s", id) + + rID, err := d.resolveID(id) + if err != nil { + return err + } + if count := d.ctr.Decrement(rID); count > 0 { + return nil + } + d.cacheMu.Lock() + _, exists := d.cache[rID] + delete(d.cache, rID) + d.cacheMu.Unlock() + + // If the cache was not populated, then the layer was left unprepared and deactivated + if !exists { + return nil + } + + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return err + } + return hcsshim.DeactivateLayer(d.info, rID) +} + +// Cleanup ensures the information the driver stores is properly removed. +// We use this opportunity to cleanup any -removing folders which may be +// still left if the daemon was killed while it was removing a layer. +func (d *Driver) Cleanup() error { + + items, err := ioutil.ReadDir(d.info.HomeDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // Note we don't return an error below - it's possible the files + // are locked. However, next time around after the daemon exits, + // we likely will be able to to cleanup successfully. Instead we log + // warnings if there are errors. + for _, item := range items { + if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { + if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { + logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) + } else { + logrus.Infof("Cleaned up %s", item.Name()) + } + } + } + + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +// The layer should be mounted when calling this function +func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { + rID, err := d.resolveID(id) + if err != nil { + return + } + + layerChain, err := d.getLayerChain(rID) + if err != nil { + return + } + + // this is assuming that the layer is unmounted + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return nil, err + } + prepare := func() { + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + logrus.Warnf("Failed to Deactivate %s: %s", rID, err) + } + } + + arch, err := d.exportLayer(rID, layerChain) + if err != nil { + prepare() + return + } + return ioutils.NewReadCloserWrapper(arch, func() error { + err := arch.Close() + prepare() + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + rID, err := d.resolveID(id) + if err != nil { + return nil, err + } + parentChain, err := d.getLayerChain(rID) + if err != nil { + return nil, err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + return nil, err + } + defer func() { + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) + } + }() + + var changes []archive.Change + err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentChain) + if err != nil { + return err + } + defer r.Close() + + for { + name, _, fileInfo, err := r.Next() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + name = filepath.ToSlash(name) + if fileInfo == nil { + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) + } else { + // Currently there is no way to tell between an add and a modify. + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) + } + } + }) + if err != nil { + return nil, err + } + + return changes, nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +// The layer should not be mounted when calling this function +func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var layerChain []string + if parent != "" { + rPId, err := d.resolveID(parent) + if err != nil { + return 0, err + } + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return 0, err + } + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return 0, err + } + layerChain = append(layerChain, parentPath) + layerChain = append(layerChain, parentChain...) + } + + size, err := d.importLayer(id, diff, layerChain) + if err != nil { + return 0, err + } + + if err = d.setLayerChain(id, layerChain); err != nil { + return 0, err + } + + return size, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + rPId, err := d.resolveID(parent) + if err != nil { + return + } + + changes, err := d.Changes(id, rPId) + if err != nil { + return + } + + layerFs, err := d.Get(id, "") + if err != nil { + return + } + defer d.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} + +// GetMetadata returns custom driver information. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { + t := tar.NewWriter(w) + for { + name, size, fileInfo, err := r.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if fileInfo == nil { + // Write a whiteout file. + hdr := &tar.Header{ + Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), + } + err := t.WriteHeader(hdr) + if err != nil { + return err + } + } else { + err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) + if err != nil { + return err + } + } + } + return t.Close() +} + +// exportLayer generates an archive from a layer based on the given ID. +func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { + archive, w := io.Pipe() + go func() { + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) + if err != nil { + return err + } + + err = writeTarFromLayer(r, w) + cerr := r.Close() + if err == nil { + err = cerr + } + return err + }) + w.CloseWithError(err) + }() + + return archive, nil +} + +// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and +// writes it to a backup stream, and also saves any files that will be mutated +// by the import layer process to a backup location. +func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { + var bcdBackup *os.File + var bcdBackupWriter *winio.BackupFileWriter + if backupPath, ok := mutatedFiles[hdr.Name]; ok { + bcdBackup, err = os.Create(filepath.Join(root, backupPath)) + if err != nil { + return nil, err + } + defer func() { + cerr := bcdBackup.Close() + if err == nil { + err = cerr + } + }() + + bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) + defer func() { + cerr := bcdBackupWriter.Close() + if err == nil { + err = cerr + } + }() + + buf.Reset(io.MultiWriter(w, bcdBackupWriter)) + } else { + buf.Reset(w) + } + + defer func() { + ferr := buf.Flush() + if err == nil { + err = ferr + } + }() + + return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) +} + +func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { + t := tar.NewReader(r) + hdr, err := t.Next() + totalSize := int64(0) + buf := bufio.NewWriter(nil) + for err == nil { + base := path.Base(hdr.Name) + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) + err = w.Remove(filepath.FromSlash(name)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else if hdr.Typeflag == tar.TypeLink { + err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else { + var ( + name string + size int64 + fileInfo *winio.FileBasicInfo + ) + name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) + if err != nil { + return 0, err + } + err = w.Add(filepath.FromSlash(name), fileInfo) + if err != nil { + return 0, err + } + hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) + totalSize += size + } + } + if err != io.EOF { + return 0, err + } + return totalSize, nil +} + +// importLayer adds a new layer to the tag and graph store based on the given data. +func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { + if !noreexec { + cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) + output := bytes.NewBuffer(nil) + cmd.Stdin = layerData + cmd.Stdout = output + cmd.Stderr = output + + if err = cmd.Start(); err != nil { + return + } + + if err = cmd.Wait(); err != nil { + return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) + } + + return strconv.ParseInt(output.String(), 10, 64) + } + return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) +} + +// writeLayerReexec is the re-exec entry point for writing a layer from a tar file +func writeLayerReexec() { + size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + fmt.Fprint(os.Stdout, size) +} + +// writeLayer writes a layer from a tar file. +func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { + err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return 0, err + } + if noreexec { + defer func() { + if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { + // This should never happen, but just in case when in debugging mode. + // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. + panic("Failed to disabled process privileges while in non re-exec mode") + } + }() + } + + info := hcsshim.DriverInfo{ + Flavour: filterDriver, + HomeDir: home, + } + + w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) + if err != nil { + return 0, err + } + + size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) + if err != nil { + return 0, err + } + + err = w.Close() + if err != nil { + return 0, err + } + + return size, nil +} + +// resolveID computes the layerID information based on the given id. +func (d *Driver) resolveID(id string) (string, error) { + content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) + if os.IsNotExist(err) { + return id, nil + } else if err != nil { + return "", err + } + return string(content), nil +} + +// setID stores the layerId in disk. +func (d *Driver) setID(id, altID string) error { + return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("Unable to read layerchain file - %s", err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) + } + + return layerChain, nil +} + +// setLayerChain stores the layer chain information in disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("Failed to marshall layerchain json - %s", err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("Unable to write layerchain file - %s", err) + } + + return nil +} + +type fileGetCloserWithBackupPrivileges struct { + path string +} + +func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { + if backupPath, ok := mutatedFiles[filename]; ok { + return os.Open(filepath.Join(fg.path, backupPath)) + } + + var f *os.File + // Open the file while holding the Windows backup privilege. This ensures that the + // file can be opened even if the caller does not actually have access to it according + // to the security descriptor. Also use sequential file access to avoid depleting the + // standby list - Microsoft VSO Bug Tracker #9900466 + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + path := longpath.AddPrefix(filepath.Join(fg.path, filename)) + p, err := syscall.UTF16FromString(path) + if err != nil { + return err + } + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, err := syscall.CreateFile(&p[0], syscall.GENERIC_READ, syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) + if err != nil { + return &os.PathError{Op: "open", Path: path, Err: err} + } + f = os.NewFile(uintptr(h), path) + return nil + }) + return f, err +} + +func (fg *fileGetCloserWithBackupPrivileges) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + id, err := d.resolveID(id) + if err != nil { + return nil, err + } + + return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil +} + +type storageOptions struct { + size uint64 +} + +func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { + options := storageOptions{} + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + options.size = uint64(size) + default: + return nil, fmt.Errorf("Unknown storage option: %s", key) + } + } + return &options, nil +} diff --git a/components/engine/daemon/graphdriver/zfs/MAINTAINERS b/components/engine/daemon/graphdriver/zfs/MAINTAINERS new file mode 100644 index 0000000000..9c270c541f --- /dev/null +++ b/components/engine/daemon/graphdriver/zfs/MAINTAINERS @@ -0,0 +1,2 @@ +Jörg Thalheim (@Mic92) +Arthur Gautier (@baloose) diff --git a/components/engine/daemon/graphdriver/zfs/zfs.go b/components/engine/daemon/graphdriver/zfs/zfs.go new file mode 100644 index 0000000000..bc2b4192a1 --- /dev/null +++ b/components/engine/daemon/graphdriver/zfs/zfs.go @@ -0,0 +1,420 @@ +// +build linux freebsd solaris + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + zfs "github.com/mistifyio/go-zfs" + "github.com/opencontainers/selinux/go-selinux/label" +) + +type zfsOptions struct { + fsName string + mountPath string +} + +func init() { + graphdriver.Register("zfs", Init) +} + +// Logger returns a zfs logger implementation. +type Logger struct{} + +// Log wraps log message from ZFS driver with a prefix '[zfs]'. +func (*Logger) Log(cmd []string) { + logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) +} + +// Init returns a new ZFS driver. +// It takes base mount path and an array of options which are represented as key value pairs. +// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. +func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + var err error + + if _, err := exec.LookPath("zfs"); err != nil { + logrus.Debugf("[zfs] zfs command is not available: %v", err) + return nil, graphdriver.ErrPrerequisites + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) + if err != nil { + logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) + return nil, graphdriver.ErrPrerequisites + } + defer file.Close() + + options, err := parseOptions(opt) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, fmt.Errorf("Failed to get root uid/guid: %v", err) + } + if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { + return nil, fmt.Errorf("Failed to create '%s': %v", base, err) + } + + if err := mount.MakePrivate(base); err != nil { + return nil, err + } + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (zfsOptions, error) { + var options zfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat syscall.Stat_t + if err := syscall.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := syscall.Stat(m.Mountpoint, &stat); err != nil { + logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.Fstype == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +// Driver holds information about the driver, such as zfs dataset, options and cache. +type Driver struct { + dataset *zfs.Dataset + options zfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func (d *Driver) String() string { + return "zfs" +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// Status returns information about the ZFS filesystem. It returns a two dimensional array of information +// such as pool name, dataset name, disk usage, parent quota and compression used. +// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', +// 'Space Available', 'Parent Quota' and 'Compression'. +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +// GetMetadata returns image/container metadata related to graph driver +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return map[string]string{ + "Mountpoint": d.mountPath(id), + "Dataset": d.zfsPath(id), + }, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) zfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) mountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + err := d.create(id, parent, storageOpt) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.zfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent, storageOpt) +} + +func (d *Driver) create(id, parent string, storageOpt map[string]string) error { + name := d.zfsPath(id) + quota, err := parseStorageOpt(storageOpt) + if err != nil { + return err + } + if parent == "" { + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + err = setQuota(name, quota) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + } + return err + } + err = d.cloneFilesystem(name, d.zfsPath(parent)) + if err == nil { + err = setQuota(name, quota) + } + return err +} + +func parseStorageOpt(storageOpt map[string]string) (string, error) { + // Read size to change the disk quota per container + for k, v := range storageOpt { + key := strings.ToLower(k) + switch key { + case "size": + return v, nil + default: + return "0", fmt.Errorf("Unknown option %s", key) + } + } + return "0", nil +} + +func setQuota(name string, quota string) error { + if quota == "0" { + return nil + } + fs, err := zfs.GetDataset(name) + if err != nil { + return err + } + return fs.SetProperty("quota", quota) +} + +// Remove deletes the dataset, filesystem and the cache for the given id. +func (d *Driver) Remove(id string) error { + name := d.zfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +// Get returns the mountpoint for the given id after creating the target directories if necessary. +func (d *Driver) Get(id, mountLabel string) (string, error) { + mountpoint := d.mountPath(id) + if count := d.ctr.Increment(mountpoint); count > 1 { + return mountpoint, nil + } + + filesystem := d.zfsPath(id) + options := label.FormatMountLabel("", mountLabel) + logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + + if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) + } + + // this could be our first mount after creation of the filesystem, and the root dir may still have root + // permissions instead of the remapped root uid:gid (if user namespaces are enabled): + if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { + mount.Unmount(mountpoint) + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) + } + + return mountpoint, nil +} + +// Put removes the existing mountpoint for the given id if it exists. +func (d *Driver) Put(id string) error { + mountpoint := d.mountPath(id) + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint) + if err != nil || !mounted { + return err + } + + logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) + + if err := mount.Unmount(mountpoint); err != nil { + return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) + } + return nil +} + +// Exists checks to see if the cache entry exists for the given id. +func (d *Driver) Exists(id string) bool { + d.Lock() + defer d.Unlock() + return d.filesystemsCache[d.zfsPath(id)] == true +} diff --git a/components/engine/daemon/graphdriver/zfs/zfs_freebsd.go b/components/engine/daemon/graphdriver/zfs/zfs_freebsd.go new file mode 100644 index 0000000000..1c05fa794c --- /dev/null +++ b/components/engine/daemon/graphdriver/zfs/zfs_freebsd.go @@ -0,0 +1,38 @@ +package zfs + +import ( + "fmt" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return id[:maxlen] + "-" + suffix[1] + } + + return id[:maxlen] +} diff --git a/components/engine/daemon/graphdriver/zfs/zfs_linux.go b/components/engine/daemon/graphdriver/zfs/zfs_linux.go new file mode 100644 index 0000000000..52ed516049 --- /dev/null +++ b/components/engine/daemon/graphdriver/zfs/zfs_linux.go @@ -0,0 +1,27 @@ +package zfs + +import ( + "fmt" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/components/engine/daemon/graphdriver/zfs/zfs_solaris.go b/components/engine/daemon/graphdriver/zfs/zfs_solaris.go new file mode 100644 index 0000000000..bb4a85bd64 --- /dev/null +++ b/components/engine/daemon/graphdriver/zfs/zfs_solaris.go @@ -0,0 +1,59 @@ +// +build solaris,cgo + +package zfs + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "strings" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + + cs := C.CString(filepath.Dir(rootdir)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + C.free(unsafe.Pointer(buf)) + return graphdriver.ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return nil +} + +/* rootfs is introduced to comply with the OCI spec +which states that root filesystem must be mounted at /rootfs/ instead of / +*/ +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root") + } + + return filepath.Join(id[:maxlen], "rootfs", "root") +} diff --git a/components/engine/daemon/graphdriver/zfs/zfs_test.go b/components/engine/daemon/graphdriver/zfs/zfs_test.go new file mode 100644 index 0000000000..3e22928438 --- /dev/null +++ b/components/engine/daemon/graphdriver/zfs/zfs_test.go @@ -0,0 +1,35 @@ +// +build linux + +package zfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestZfsSetup and TestZfsTeardown +func TestZfsSetup(t *testing.T) { + graphtest.GetDriver(t, "zfs") +} + +func TestZfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "zfs") +} + +func TestZfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "zfs") +} + +func TestZfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "zfs") +} + +func TestZfsSetQuota(t *testing.T) { + graphtest.DriverTestSetQuota(t, "zfs") +} + +func TestZfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/components/engine/daemon/graphdriver/zfs/zfs_unsupported.go b/components/engine/daemon/graphdriver/zfs/zfs_unsupported.go new file mode 100644 index 0000000000..ce8daadaf6 --- /dev/null +++ b/components/engine/daemon/graphdriver/zfs/zfs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris + +package zfs + +func checkRootdirFs(rootdir string) error { + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/components/engine/daemon/health.go b/components/engine/daemon/health.go new file mode 100644 index 0000000000..caa8db8447 --- /dev/null +++ b/components/engine/daemon/health.go @@ -0,0 +1,363 @@ +package daemon + +import ( + "bytes" + "fmt" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +const ( + // Longest healthcheck probe output message to store. Longer messages will be truncated. + maxOutputLen = 4096 + + // Default interval between probe runs (from the end of the first to the start of the second). + // Also the time before the first probe. + defaultProbeInterval = 30 * time.Second + + // The maximum length of time a single probe run should take. If the probe takes longer + // than this, the check is considered to have failed. + defaultProbeTimeout = 30 * time.Second + + // The time given for the container to start before the health check starts considering + // the container unstable. Defaults to none. + defaultStartPeriod = 0 * time.Second + + // Default number of consecutive failures of the health check + // for the container to be considered unhealthy. + defaultProbeRetries = 3 + + // Maximum number of entries to record + maxLogEntries = 5 +) + +const ( + // Exit status codes that can be returned by the probe command. + + exitStatusHealthy = 0 // Container is healthy + exitStatusUnhealthy = 1 // Container is unhealthy +) + +// probe implementations know how to run a particular type of probe. +type probe interface { + // Perform one run of the check. Returns the exit code and an optional + // short diagnostic string. + run(context.Context, *Daemon, *container.Container) (*types.HealthcheckResult, error) +} + +// cmdProbe implements the "CMD" probe type. +type cmdProbe struct { + // Run the command with the system's default shell instead of execing it directly. + shell bool +} + +// exec the healthcheck command in the container. +// Returns the exit code and probe output (if any) +func (p *cmdProbe) run(ctx context.Context, d *Daemon, container *container.Container) (*types.HealthcheckResult, error) { + + cmdSlice := strslice.StrSlice(container.Config.Healthcheck.Test)[1:] + if p.shell { + cmdSlice = append(getShell(container.Config), cmdSlice...) + } + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmdSlice) + execConfig := exec.NewConfig() + execConfig.OpenStdin = false + execConfig.OpenStdout = true + execConfig.OpenStderr = true + execConfig.ContainerID = container.ID + execConfig.DetachKeys = []byte{} + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = false + execConfig.Privileged = false + execConfig.User = container.Config.User + execConfig.Env = container.Config.Env + + d.registerExecCommand(container, execConfig) + d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + output := &limitedBuffer{} + err := d.ContainerExecStart(ctx, execConfig.ID, nil, output, output) + if err != nil { + return nil, err + } + info, err := d.getExecConfig(execConfig.ID) + if err != nil { + return nil, err + } + if info.ExitCode == nil { + return nil, fmt.Errorf("Healthcheck for container %s has no exit code!", container.ID) + } + // Note: Go's json package will handle invalid UTF-8 for us + out := output.String() + return &types.HealthcheckResult{ + End: time.Now(), + ExitCode: *info.ExitCode, + Output: out, + }, nil +} + +// Update the container's Status.Health struct based on the latest probe's result. +func handleProbeResult(d *Daemon, c *container.Container, result *types.HealthcheckResult, done chan struct{}) { + c.Lock() + defer c.Unlock() + + // probe may have been cancelled while waiting on lock. Ignore result then + select { + case <-done: + return + default: + } + + retries := c.Config.Healthcheck.Retries + if retries <= 0 { + retries = defaultProbeRetries + } + + h := c.State.Health + oldStatus := h.Status + + if len(h.Log) >= maxLogEntries { + h.Log = append(h.Log[len(h.Log)+1-maxLogEntries:], result) + } else { + h.Log = append(h.Log, result) + } + + if result.ExitCode == exitStatusHealthy { + h.FailingStreak = 0 + h.Status = types.Healthy + } else { // Failure (including invalid exit code) + shouldIncrementStreak := true + + // If the container is starting (i.e. we never had a successful health check) + // then we check if we are within the start period of the container in which + // case we do not increment the failure streak. + if h.Status == types.Starting { + startPeriod := timeoutWithDefault(c.Config.Healthcheck.StartPeriod, defaultStartPeriod) + timeSinceStart := result.Start.Sub(c.State.StartedAt) + + // If still within the start period, then don't increment failing streak. + if timeSinceStart < startPeriod { + shouldIncrementStreak = false + } + } + + if shouldIncrementStreak { + h.FailingStreak++ + + if h.FailingStreak >= retries { + h.Status = types.Unhealthy + } + } + // Else we're starting or healthy. Stay in that state. + } + + if oldStatus != h.Status { + d.LogContainerEvent(c, "health_status: "+h.Status) + } +} + +// Run the container's monitoring thread until notified via "stop". +// There is never more than one monitor thread running per container at a time. +func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) { + probeTimeout := timeoutWithDefault(c.Config.Healthcheck.Timeout, defaultProbeTimeout) + probeInterval := timeoutWithDefault(c.Config.Healthcheck.Interval, defaultProbeInterval) + for { + select { + case <-stop: + logrus.Debugf("Stop healthcheck monitoring for container %s (received while idle)", c.ID) + return + case <-time.After(probeInterval): + logrus.Debugf("Running health check for container %s ...", c.ID) + startTime := time.Now() + ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout) + results := make(chan *types.HealthcheckResult) + go func() { + healthChecksCounter.Inc() + result, err := probe.run(ctx, d, c) + if err != nil { + healthChecksFailedCounter.Inc() + logrus.Warnf("Health check for container %s error: %v", c.ID, err) + results <- &types.HealthcheckResult{ + ExitCode: -1, + Output: err.Error(), + Start: startTime, + End: time.Now(), + } + } else { + result.Start = startTime + logrus.Debugf("Health check for container %s done (exitCode=%d)", c.ID, result.ExitCode) + results <- result + } + close(results) + }() + select { + case <-stop: + logrus.Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID) + // Stop timeout and kill probe, but don't wait for probe to exit. + cancelProbe() + return + case result := <-results: + handleProbeResult(d, c, result, stop) + // Stop timeout + cancelProbe() + case <-ctx.Done(): + logrus.Debugf("Health check for container %s taking too long", c.ID) + handleProbeResult(d, c, &types.HealthcheckResult{ + ExitCode: -1, + Output: fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout), + Start: startTime, + End: time.Now(), + }, stop) + cancelProbe() + // Wait for probe to exit (it might take a while to respond to the TERM + // signal and we don't want dying probes to pile up). + <-results + } + } + } +} + +// Get a suitable probe implementation for the container's healthcheck configuration. +// Nil will be returned if no healthcheck was configured or NONE was set. +func getProbe(c *container.Container) probe { + config := c.Config.Healthcheck + if config == nil || len(config.Test) == 0 { + return nil + } + switch config.Test[0] { + case "CMD": + return &cmdProbe{shell: false} + case "CMD-SHELL": + return &cmdProbe{shell: true} + default: + logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID) + return nil + } +} + +// Ensure the health-check monitor is running or not, depending on the current +// state of the container. +// Called from monitor.go, with c locked. +func (d *Daemon) updateHealthMonitor(c *container.Container) { + h := c.State.Health + if h == nil { + return // No healthcheck configured + } + + probe := getProbe(c) + wantRunning := c.Running && !c.Paused && probe != nil + if wantRunning { + if stop := h.OpenMonitorChannel(); stop != nil { + go monitor(d, c, stop, probe) + } + } else { + h.CloseMonitorChannel() + } +} + +// Reset the health state for a newly-started, restarted or restored container. +// initHealthMonitor is called from monitor.go and we should never be running +// two instances at once. +// Called with c locked. +func (d *Daemon) initHealthMonitor(c *container.Container) { + // If no healthcheck is setup then don't init the monitor + if getProbe(c) == nil { + return + } + + // This is needed in case we're auto-restarting + d.stopHealthchecks(c) + + if h := c.State.Health; h != nil { + h.Status = types.Starting + h.FailingStreak = 0 + } else { + h := &container.Health{} + h.Status = types.Starting + c.State.Health = h + } + + d.updateHealthMonitor(c) +} + +// Called when the container is being stopped (whether because the health check is +// failing or for any other reason). +func (d *Daemon) stopHealthchecks(c *container.Container) { + h := c.State.Health + if h != nil { + h.CloseMonitorChannel() + } +} + +// Buffer up to maxOutputLen bytes. Further data is discarded. +type limitedBuffer struct { + buf bytes.Buffer + mu sync.Mutex + truncated bool // indicates that data has been lost +} + +// Append to limitedBuffer while there is room. +func (b *limitedBuffer) Write(data []byte) (int, error) { + b.mu.Lock() + defer b.mu.Unlock() + + bufLen := b.buf.Len() + dataLen := len(data) + keep := min(maxOutputLen-bufLen, dataLen) + if keep > 0 { + b.buf.Write(data[:keep]) + } + if keep < dataLen { + b.truncated = true + } + return dataLen, nil +} + +// The contents of the buffer, with "..." appended if it overflowed. +func (b *limitedBuffer) String() string { + b.mu.Lock() + defer b.mu.Unlock() + + out := b.buf.String() + if b.truncated { + out = out + "..." + } + return out +} + +// If configuredValue is zero, use defaultValue instead. +func timeoutWithDefault(configuredValue time.Duration, defaultValue time.Duration) time.Duration { + if configuredValue == 0 { + return defaultValue + } + return configuredValue +} + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +func getShell(config *containertypes.Config) []string { + if len(config.Shell) != 0 { + return config.Shell + } + if runtime.GOOS != "windows" { + return []string{"/bin/sh", "-c"} + } + return []string{"cmd", "/S", "/C"} +} diff --git a/components/engine/daemon/health_test.go b/components/engine/daemon/health_test.go new file mode 100644 index 0000000000..fbbdf06c52 --- /dev/null +++ b/components/engine/daemon/health_test.go @@ -0,0 +1,145 @@ +package daemon + +import ( + "testing" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" +) + +func reset(c *container.Container) { + c.State = &container.State{} + c.State.Health = &container.Health{} + c.State.Health.Status = types.Starting +} + +func TestNoneHealthcheck(t *testing.T) { + c := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Healthcheck: &containertypes.HealthConfig{ + Test: []string{"NONE"}, + }, + }, + State: &container.State{}, + }, + } + daemon := &Daemon{} + + daemon.initHealthMonitor(c) + if c.State.Health != nil { + t.Error("Expecting Health to be nil, but was not") + } +} + +// FIXME(vdemeester) This takes around 3s… This is *way* too long +func TestHealthStates(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + expect := func(expected string) { + select { + case event := <-l: + ev := event.(eventtypes.Message) + if ev.Status != expected { + t.Errorf("Expecting event %#v, but got %#v\n", expected, ev.Status) + } + case <-time.After(1 * time.Second): + t.Errorf("Expecting event %#v, but got nothing\n", expected) + } + } + + c := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + + c.Config.Healthcheck = &containertypes.HealthConfig{ + Retries: 1, + } + + reset(c) + + handleResult := func(startTime time.Time, exitCode int) { + handleProbeResult(daemon, c, &types.HealthcheckResult{ + Start: startTime, + End: startTime, + ExitCode: exitCode, + }, nil) + } + + // starting -> failed -> success -> failed + + handleResult(c.State.StartedAt.Add(1*time.Second), 1) + expect("health_status: unhealthy") + + handleResult(c.State.StartedAt.Add(2*time.Second), 0) + expect("health_status: healthy") + + handleResult(c.State.StartedAt.Add(3*time.Second), 1) + expect("health_status: unhealthy") + + // Test retries + + reset(c) + c.Config.Healthcheck.Retries = 3 + + handleResult(c.State.StartedAt.Add(20*time.Second), 1) + handleResult(c.State.StartedAt.Add(40*time.Second), 1) + if c.State.Health.Status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) + } + if c.State.Health.FailingStreak != 2 { + t.Errorf("Expecting FailingStreak=2, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(60*time.Second), 1) + expect("health_status: unhealthy") + + handleResult(c.State.StartedAt.Add(80*time.Second), 0) + expect("health_status: healthy") + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } + + // Test start period + + reset(c) + c.Config.Healthcheck.Retries = 2 + c.Config.Healthcheck.StartPeriod = 30 * time.Second + + handleResult(c.State.StartedAt.Add(20*time.Second), 1) + if c.State.Health.Status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) + } + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(50*time.Second), 1) + if c.State.Health.Status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) + } + if c.State.Health.FailingStreak != 1 { + t.Errorf("Expecting FailingStreak=1, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(80*time.Second), 0) + expect("health_status: healthy") + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } +} diff --git a/components/engine/daemon/image.go b/components/engine/daemon/image.go new file mode 100644 index 0000000000..d10457118f --- /dev/null +++ b/components/engine/daemon/image.go @@ -0,0 +1,76 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" +) + +// ErrImageDoesNotExist is error returned when no image can be found for a reference. +type ErrImageDoesNotExist struct { + ref reference.Reference +} + +func (e ErrImageDoesNotExist) Error() string { + ref := e.ref + if named, ok := ref.(reference.Named); ok { + ref = reference.TagNameOnly(named) + } + return fmt.Sprintf("No such image: %s", reference.FamiliarString(ref)) +} + +// GetImageID returns an image ID corresponding to the image referred to by +// refOrID. +func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { + ref, err := reference.ParseAnyReference(refOrID) + if err != nil { + return "", err + } + namedRef, ok := ref.(reference.Named) + if !ok { + digested, ok := ref.(reference.Digested) + if !ok { + return "", ErrImageDoesNotExist{ref} + } + id := image.IDFromDigest(digested.Digest()) + if _, err := daemon.imageStore.Get(id); err != nil { + return "", ErrImageDoesNotExist{ref} + } + return id, nil + } + + if id, err := daemon.referenceStore.Get(namedRef); err == nil { + return image.IDFromDigest(id), nil + } + + // deprecated: repo:shortid https://github.com/docker/docker/pull/799 + if tagged, ok := namedRef.(reference.Tagged); ok { + if tag := tagged.Tag(); stringid.IsShortID(stringid.TruncateID(tag)) { + if id, err := daemon.imageStore.Search(tag); err == nil { + for _, storeRef := range daemon.referenceStore.References(id.Digest()) { + if storeRef.Name() == namedRef.Name() { + return id, nil + } + } + } + } + } + + // Search based on ID + if id, err := daemon.imageStore.Search(refOrID); err == nil { + return id, nil + } + + return "", ErrImageDoesNotExist{ref} +} + +// GetImage returns an image corresponding to the image referred to by refOrID. +func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { + imgID, err := daemon.GetImageID(refOrID) + if err != nil { + return nil, err + } + return daemon.imageStore.Get(imgID) +} diff --git a/components/engine/daemon/image_delete.go b/components/engine/daemon/image_delete.go new file mode 100644 index 0000000000..b7dbd249eb --- /dev/null +++ b/components/engine/daemon/image_delete.go @@ -0,0 +1,413 @@ +package daemon + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" +) + +type conflictType int + +const ( + conflictDependentChild conflictType = (1 << iota) + conflictRunningContainer + conflictActiveReference + conflictStoppedContainer + conflictHard = conflictDependentChild | conflictRunningContainer + conflictSoft = conflictActiveReference | conflictStoppedContainer +) + +// ImageDelete deletes the image referenced by the given imageRef from this +// daemon. The given imageRef can be an image ID, ID prefix, or a repository +// reference (with an optional tag or digest, defaulting to the tag name +// "latest"). There is differing behavior depending on whether the given +// imageRef is a repository reference or not. +// +// If the given imageRef is a repository reference then that repository +// reference will be removed. However, if there exists any containers which +// were created using the same image reference then the repository reference +// cannot be removed unless either there are other repository references to the +// same image or force is true. Following removal of the repository reference, +// the referenced image itself will attempt to be deleted as described below +// but quietly, meaning any image delete conflicts will cause the image to not +// be deleted and the conflict will not be reported. +// +// There may be conflicts preventing deletion of an image and these conflicts +// are divided into two categories grouped by their severity: +// +// Hard Conflict: +// - a pull or build using the image. +// - any descendant image. +// - any running container using the image. +// +// Soft Conflict: +// - any stopped container using the image. +// - any repository tag or digest references to the image. +// +// The image cannot be removed if there are any hard conflicts and can be +// removed if there are soft conflicts only if force is true. +// +// If prune is true, ancestor images will each attempt to be deleted quietly, +// meaning any delete conflicts will cause the image to not be deleted and the +// conflict will not be reported. +// +// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph +// package. This would require that we no longer need the daemon to determine +// whether images are being used by a stopped or running container. +func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) { + start := time.Now() + records := []types.ImageDeleteResponseItem{} + + imgID, err := daemon.GetImageID(imageRef) + if err != nil { + return nil, daemon.imageNotExistToErrcode(err) + } + + repoRefs := daemon.referenceStore.References(imgID.Digest()) + + var removedRepositoryRef bool + if !isImageIDPrefix(imgID.String(), imageRef) { + // A repository reference was given and should be removed + // first. We can only remove this reference if either force is + // true, there are multiple repository references to this + // image, or there are no containers using the given reference. + if !force && isSingleReference(repoRefs) { + if container := daemon.getContainerUsingImage(imgID); container != nil { + // If we removed the repository reference then + // this image would remain "dangling" and since + // we really want to avoid that the client must + // explicitly force its removal. + err := fmt.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) + return nil, errors.NewRequestConflictError(err) + } + } + + parsedRef, err := reference.ParseNormalizedNamed(imageRef) + if err != nil { + return nil, err + } + + parsedRef, err = daemon.removeImageRef(parsedRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + + repoRefs = daemon.referenceStore.References(imgID.Digest()) + + // If a tag reference was removed and the only remaining + // references to the same repository are digest references, + // then clean up those digest references. + if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { + foundRepoTagRef := false + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { + foundRepoTagRef = true + break + } + } + if !foundRepoTagRef { + // Remove canonical references from same repository + remainingRefs := []reference.Named{} + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { + if _, err := daemon.removeImageRef(repoRef); err != nil { + return records, err + } + + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(repoRef)} + records = append(records, untaggedRecord) + } else { + remainingRefs = append(remainingRefs, repoRef) + + } + } + repoRefs = remainingRefs + } + } + + // If it has remaining references then the untag finished the remove + if len(repoRefs) > 0 { + return records, nil + } + + removedRepositoryRef = true + } else { + // If an ID reference was given AND there is at most one tag + // reference to the image AND all references are within one + // repository, then remove all references. + if isSingleReference(repoRefs) { + c := conflictHard + if !force { + c |= conflictSoft &^ conflictActiveReference + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + return nil, conflict + } + + for _, repoRef := range repoRefs { + parsedRef, err := daemon.removeImageRef(repoRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + } + } + } + + if err := daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil { + return nil, err + } + + imageActions.WithValues("delete").UpdateSince(start) + + return records, nil +} + +// isSingleReference returns true when all references are from one repository +// and there is at most one tag. Returns false for empty input. +func isSingleReference(repoRefs []reference.Named) bool { + if len(repoRefs) <= 1 { + return len(repoRefs) == 1 + } + var singleRef reference.Named + canonicalRefs := map[string]struct{}{} + for _, repoRef := range repoRefs { + if _, isCanonical := repoRef.(reference.Canonical); isCanonical { + canonicalRefs[repoRef.Name()] = struct{}{} + } else if singleRef == nil { + singleRef = repoRef + } else { + return false + } + } + if singleRef == nil { + // Just use first canonical ref + singleRef = repoRefs[0] + } + _, ok := canonicalRefs[singleRef.Name()] + return len(canonicalRefs) == 1 && ok +} + +// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the +// given imageID. +func isImageIDPrefix(imageID, possiblePrefix string) bool { + if strings.HasPrefix(imageID, possiblePrefix) { + return true + } + + if i := strings.IndexRune(imageID, ':'); i >= 0 { + return strings.HasPrefix(imageID[i+1:], possiblePrefix) + } + + return false +} + +// getContainerUsingImage returns a container that was created using the given +// imageID. Returns nil if there is no such container. +func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { + return daemon.containers.First(func(c *container.Container) bool { + return c.ImageID == imageID + }) +} + +// removeImageRef attempts to parse and remove the given image reference from +// this daemon's store of repository tag/digest references. The given +// repositoryRef must not be an image ID but a repository name followed by an +// optional tag or digest reference. If tag or digest is omitted, the default +// tag is used. Returns the resolved image reference and an error. +func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) { + ref = reference.TagNameOnly(ref) + + // Ignore the boolean value returned, as far as we're concerned, this + // is an idempotent operation and it's okay if the reference didn't + // exist in the first place. + _, err := daemon.referenceStore.Delete(ref) + + return ref, err +} + +// removeAllReferencesToImageID attempts to remove every reference to the given +// imgID from this daemon's store of repository tag/digest references. Returns +// on the first encountered error. Removed references are logged to this +// daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the +// given list of records. +func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDeleteResponseItem) error { + imageRefs := daemon.referenceStore.References(imgID.Digest()) + + for _, imageRef := range imageRefs { + parsedRef, err := daemon.removeImageRef(imageRef) + if err != nil { + return err + } + + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + *records = append(*records, untaggedRecord) + } + + return nil +} + +// ImageDeleteConflict holds a soft or hard conflict and an associated error. +// Implements the error interface. +type imageDeleteConflict struct { + hard bool + used bool + imgID image.ID + message string +} + +func (idc *imageDeleteConflict) Error() string { + var forceMsg string + if idc.hard { + forceMsg = "cannot be forced" + } else { + forceMsg = "must be forced" + } + + return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) +} + +// imageDeleteHelper attempts to delete the given image from this daemon. If +// the image has any hard delete conflicts (child images or running containers +// using the image) then it cannot be deleted. If the image has any soft delete +// conflicts (any tags/digests referencing the image or any stopped container +// using the image) then it can only be deleted if force is true. If the delete +// succeeds and prune is true, the parent images are also deleted if they do +// not have any soft or hard delete conflicts themselves. Any deleted images +// and untagged references are appended to the given records. If any error or +// conflict is encountered, it will be returned immediately without deleting +// the image. If quiet is true, any encountered conflicts will be ignored and +// the function will return nil immediately without deleting the image. +func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error { + // First, determine if this image has any conflicts. Ignore soft conflicts + // if force is true. + c := conflictHard + if !force { + c |= conflictSoft + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) { + // Ignore conflicts UNLESS the image is "dangling" or not being used in + // which case we want the user to know. + return nil + } + + // There was a conflict and it's either a hard conflict OR we are not + // forcing deletion on soft conflicts. + return conflict + } + + parent, err := daemon.imageStore.GetParent(imgID) + if err != nil { + // There may be no parent + parent = "" + } + + // Delete all repository tag/digest references to this image. + if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil { + return err + } + + removedLayers, err := daemon.imageStore.Delete(imgID) + if err != nil { + return err + } + + daemon.LogImageEvent(imgID.String(), imgID.String(), "delete") + *records = append(*records, types.ImageDeleteResponseItem{Deleted: imgID.String()}) + for _, removedLayer := range removedLayers { + *records = append(*records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) + } + + if !prune || parent == "" { + return nil + } + + // We need to prune the parent image. This means delete it if there are + // no tags/digests referencing it and there are no containers using it ( + // either running or stopped). + // Do not force prunings, but do so quietly (stopping on any encountered + // conflicts). + return daemon.imageDeleteHelper(parent, records, false, true, true) +} + +// checkImageDeleteConflict determines whether there are any conflicts +// preventing deletion of the given image from this daemon. A hard conflict is +// any image which has the given image as a parent or any running container +// using the image. A soft conflict is any tags/digest referencing the given +// image or any stopped container using the image. If ignoreSoftConflicts is +// true, this function will not check for soft conflict conditions. +func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { + // Check if the image has any descendant images. + if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 { + return &imageDeleteConflict{ + hard: true, + imgID: imgID, + message: "image has dependent child images", + } + } + + if mask&conflictRunningContainer != 0 { + // Check if any running container is using the image. + running := func(c *container.Container) bool { + return c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(running); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + hard: true, + used: true, + message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), + } + } + } + + // Check if any repository tags/digest reference this image. + if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID.Digest())) > 0 { + return &imageDeleteConflict{ + imgID: imgID, + message: "image is referenced in multiple repositories", + } + } + + if mask&conflictStoppedContainer != 0 { + // Check if any stopped containers reference this image. + stopped := func(c *container.Container) bool { + return !c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(stopped); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + used: true, + message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), + } + } + } + + return nil +} + +// imageIsDangling returns whether the given image is "dangling" which means +// that there are no repository references to the given image and it has no +// child images. +func (daemon *Daemon) imageIsDangling(imgID image.ID) bool { + return !(len(daemon.referenceStore.References(imgID.Digest())) > 0 || len(daemon.imageStore.Children(imgID)) > 0) +} diff --git a/components/engine/daemon/image_exporter.go b/components/engine/daemon/image_exporter.go new file mode 100644 index 0000000000..95d1d3dcdb --- /dev/null +++ b/components/engine/daemon/image_exporter.go @@ -0,0 +1,25 @@ +package daemon + +import ( + "io" + + "github.com/docker/docker/image/tarexport" +) + +// ExportImage exports a list of images to the given output stream. The +// exported images are archived into a tar when written to the output +// stream. All images with the given tag and all versions containing +// the same tag are exported. names is the set of tags to export, and +// outStream is the writer which the images are written to. +func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) + return imageExporter.Save(names, outStream) +} + +// LoadImage uploads a set of images into the repository. This is the +// complement of ImageExport. The input stream is an uncompressed tar +// ball containing images and metadata. +func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) + return imageExporter.Load(inTar, outStream, quiet) +} diff --git a/components/engine/daemon/image_history.go b/components/engine/daemon/image_history.go new file mode 100644 index 0000000000..b763c86c03 --- /dev/null +++ b/components/engine/daemon/image_history.go @@ -0,0 +1,84 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/layer" +) + +// ImageHistory returns a slice of ImageHistory structures for the specified image +// name by walking the image lineage. +func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, error) { + start := time.Now() + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + + history := []*image.HistoryResponseItem{} + + layerCounter := 0 + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + for _, h := range img.History { + var layerSize int64 + + if !h.EmptyLayer { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, fmt.Errorf("too many non-empty layers in History section") + } + + rootFS.Append(img.RootFS.DiffIDs[layerCounter]) + l, err := daemon.layerStore.Get(rootFS.ChainID()) + if err != nil { + return nil, err + } + layerSize, err = l.DiffSize() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + + layerCounter++ + } + + history = append([]*image.HistoryResponseItem{{ + ID: "", + Created: h.Created.Unix(), + CreatedBy: h.CreatedBy, + Comment: h.Comment, + Size: layerSize, + }}, history...) + } + + // Fill in image IDs and tags + histImg := img + id := img.ID() + for _, h := range history { + h.ID = id.String() + + var tags []string + for _, r := range daemon.referenceStore.References(id.Digest()) { + if _, ok := r.(reference.NamedTagged); ok { + tags = append(tags, reference.FamiliarString(r)) + } + } + + h.Tags = tags + + id = histImg.Parent + if id == "" { + break + } + histImg, err = daemon.GetImage(id.String()) + if err != nil { + break + } + } + imageActions.WithValues("history").UpdateSince(start) + return history, nil +} diff --git a/components/engine/daemon/image_inspect.go b/components/engine/daemon/image_inspect.go new file mode 100644 index 0000000000..267a41946a --- /dev/null +++ b/components/engine/daemon/image_inspect.go @@ -0,0 +1,82 @@ +package daemon + +import ( + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/pkg/errors" +) + +// LookupImage looks up an image by name and returns it as an ImageInspect +// structure. +func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, errors.Wrapf(err, "no such image: %s", name) + } + + refs := daemon.referenceStore.References(img.ID().Digest()) + repoTags := []string{} + repoDigests := []string{} + for _, ref := range refs { + switch ref.(type) { + case reference.NamedTagged: + repoTags = append(repoTags, reference.FamiliarString(ref)) + case reference.Canonical: + repoDigests = append(repoDigests, reference.FamiliarString(ref)) + } + } + + var size int64 + var layerMetadata map[string]string + layerID := img.RootFS.ChainID() + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + size, err = l.Size() + if err != nil { + return nil, err + } + + layerMetadata, err = l.Metadata() + if err != nil { + return nil, err + } + } + + comment := img.Comment + if len(comment) == 0 && len(img.History) > 0 { + comment = img.History[len(img.History)-1].Comment + } + + imageInspect := &types.ImageInspect{ + ID: img.ID().String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: img.Parent.String(), + Comment: comment, + Created: img.Created.Format(time.RFC3339Nano), + Container: img.Container, + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: img.Architecture, + Os: img.OS, + OsVersion: img.OSVersion, + Size: size, + VirtualSize: size, // TODO: field unused, deprecate + RootFS: rootFSToAPIType(img.RootFS), + } + + imageInspect.GraphDriver.Name = daemon.GraphDriverName() + + imageInspect.GraphDriver.Data = layerMetadata + + return imageInspect, nil +} diff --git a/components/engine/daemon/image_pull.go b/components/engine/daemon/image_pull.go new file mode 100644 index 0000000000..304fd9f024 --- /dev/null +++ b/components/engine/daemon/image_pull.go @@ -0,0 +1,119 @@ +package daemon + +import ( + "io" + "strings" + + dist "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +// PullImage initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func (daemon *Daemon) PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Special case: "pull -a" may send an image name with a + // trailing :. This is ugly, but let's not break API + // compatibility. + image = strings.TrimSuffix(image, ":") + + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return err + } + + if tag != "" { + // The "tag" could actually be a digest. + var dgst digest.Digest + dgst, err = digest.Parse(tag) + if err == nil { + ref, err = reference.WithDigest(reference.TrimNamed(ref), dgst) + } else { + ref, err = reference.WithTag(ref, tag) + } + if err != nil { + return err + } + } + + return daemon.pullImageWithReference(ctx, ref, metaHeaders, authConfig, outStream) +} + +func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), + ReferenceStore: daemon.referenceStore, + }, + DownloadManager: daemon.downloadManager, + Schema2Types: distribution.ImageTypes, + } + + err := distribution.Pull(ctx, ref, imagePullConfig) + close(progressChan) + <-writesDone + return err +} + +// GetRepository returns a repository from the registry. +func (daemon *Daemon) GetRepository(ctx context.Context, ref reference.Named, authConfig *types.AuthConfig) (dist.Repository, bool, error) { + // get repository info + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, false, err + } + // makes sure name is not empty or `scratch` + if err := distribution.ValidateRepoName(repoInfo.Name); err != nil { + return nil, false, err + } + + // get endpoints + endpoints, err := daemon.RegistryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return nil, false, err + } + + // retrieve repository + var ( + confirmedV2 bool + repository dist.Repository + lastError error + ) + + for _, endpoint := range endpoints { + if endpoint.Version == registry.APIVersion1 { + continue + } + + repository, confirmedV2, lastError = distribution.NewV2Repository(ctx, repoInfo, endpoint, nil, authConfig, "pull") + if lastError == nil && confirmedV2 { + break + } + } + return repository, confirmedV2, lastError +} diff --git a/components/engine/daemon/image_push.go b/components/engine/daemon/image_push.go new file mode 100644 index 0000000000..0f060d117f --- /dev/null +++ b/components/engine/daemon/image_push.go @@ -0,0 +1,63 @@ +package daemon + +import ( + "io" + + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +// PushImage initiates a push operation on the repository named localName. +func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return err + } + if tag != "" { + // Push by digest is not supported, so only tags are supported. + ref, err = reference.WithTag(ref, tag) + if err != nil { + return err + } + } + + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePushConfig := &distribution.ImagePushConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), + ReferenceStore: daemon.referenceStore, + }, + ConfigMediaType: schema2.MediaTypeImageConfig, + LayerStore: distribution.NewLayerProviderFromStore(daemon.layerStore), + TrustKey: daemon.trustKey, + UploadManager: daemon.uploadManager, + } + + err = distribution.Push(ctx, ref, imagePushConfig) + close(progressChan) + <-writesDone + return err +} diff --git a/components/engine/daemon/image_tag.go b/components/engine/daemon/image_tag.go new file mode 100644 index 0000000000..10a584b361 --- /dev/null +++ b/components/engine/daemon/image_tag.go @@ -0,0 +1,37 @@ +package daemon + +import ( + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" +) + +// TagImage creates the tag specified by newTag, pointing to the image named +// imageName (alternatively, imageName can also be an image ID). +func (daemon *Daemon) TagImage(imageName, repository, tag string) error { + imageID, err := daemon.GetImageID(imageName) + if err != nil { + return err + } + + newTag, err := reference.ParseNormalizedNamed(repository) + if err != nil { + return err + } + if tag != "" { + if newTag, err = reference.WithTag(reference.TrimNamed(newTag), tag); err != nil { + return err + } + } + + return daemon.TagImageWithReference(imageID, newTag) +} + +// TagImageWithReference adds the given reference to the image ID provided. +func (daemon *Daemon) TagImageWithReference(imageID image.ID, newTag reference.Named) error { + if err := daemon.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { + return err + } + + daemon.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag") + return nil +} diff --git a/components/engine/daemon/images.go b/components/engine/daemon/images.go new file mode 100644 index 0000000000..b014fd4887 --- /dev/null +++ b/components/engine/daemon/images.go @@ -0,0 +1,336 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "sort" + "time" + + "github.com/pkg/errors" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" +) + +var acceptedImageFilterTags = map[string]bool{ + "dangling": true, + "label": true, + "before": true, + "since": true, + "reference": true, +} + +// byCreated is a temporary type used to sort a list of images by creation +// time. +type byCreated []*types.ImageSummary + +func (r byCreated) Len() int { return len(r) } +func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } + +// Map returns a map of all images in the ImageStore +func (daemon *Daemon) Map() map[image.ID]*image.Image { + return daemon.imageStore.Map() +} + +// Images returns a filtered list of images. filterArgs is a JSON-encoded set +// of filter arguments which will be interpreted by api/types/filters. +// filter is a shell glob string applied to repository names. The argument +// named all controls whether all images in the graph are filtered, or just +// the heads. +func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { + var ( + allImages map[image.ID]*image.Image + err error + danglingOnly = false + ) + + if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { + return nil, err + } + + if imageFilters.Include("dangling") { + if imageFilters.ExactMatch("dangling", "true") { + danglingOnly = true + } else if !imageFilters.ExactMatch("dangling", "false") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) + } + } + if danglingOnly { + allImages = daemon.imageStore.Heads() + } else { + allImages = daemon.imageStore.Map() + } + + var beforeFilter, sinceFilter *image.Image + err = imageFilters.WalkValues("before", func(value string) error { + beforeFilter, err = daemon.GetImage(value) + return err + }) + if err != nil { + return nil, err + } + + err = imageFilters.WalkValues("since", func(value string) error { + sinceFilter, err = daemon.GetImage(value) + return err + }) + if err != nil { + return nil, err + } + + images := []*types.ImageSummary{} + var imagesMap map[*image.Image]*types.ImageSummary + var layerRefs map[layer.ChainID]int + var allLayers map[layer.ChainID]layer.Layer + var allContainers []*container.Container + + for id, img := range allImages { + if beforeFilter != nil { + if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { + continue + } + } + + if sinceFilter != nil { + if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { + continue + } + } + + if imageFilters.Include("label") { + // Very old image that do not have image.Config (or even labels) + if img.Config == nil { + continue + } + // We are now sure image.Config is not nil + if !imageFilters.MatchKVList("label", img.Config.Labels) { + continue + } + } + + layerID := img.RootFS.ChainID() + var size int64 + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + // The layer may have been deleted between the call to `Map()` or + // `Heads()` and the call to `Get()`, so we just ignore this error + if err == layer.ErrLayerDoesNotExist { + continue + } + return nil, err + } + + size, err = l.Size() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + } + + newImage := newImage(img, size) + + for _, ref := range daemon.referenceStore.References(id.Digest()) { + if imageFilters.Include("reference") { + var found bool + var matchErr error + for _, pattern := range imageFilters.Get("reference") { + found, matchErr = reference.FamiliarMatch(pattern, ref) + if matchErr != nil { + return nil, matchErr + } + } + if !found { + continue + } + } + if _, ok := ref.(reference.Canonical); ok { + newImage.RepoDigests = append(newImage.RepoDigests, reference.FamiliarString(ref)) + } + if _, ok := ref.(reference.NamedTagged); ok { + newImage.RepoTags = append(newImage.RepoTags, reference.FamiliarString(ref)) + } + } + if newImage.RepoDigests == nil && newImage.RepoTags == nil { + if all || len(daemon.imageStore.Children(id)) == 0 { + + if imageFilters.Include("dangling") && !danglingOnly { + //dangling=false case, so dangling image is not needed + continue + } + if imageFilters.Include("reference") { // skip images with no references if filtering by reference + continue + } + newImage.RepoDigests = []string{"@"} + newImage.RepoTags = []string{":"} + } else { + continue + } + } else if danglingOnly && len(newImage.RepoTags) > 0 { + continue + } + + if withExtraAttrs { + // lazily init variables + if imagesMap == nil { + allContainers = daemon.List() + allLayers = daemon.layerStore.Map() + imagesMap = make(map[*image.Image]*types.ImageSummary) + layerRefs = make(map[layer.ChainID]int) + } + + // Get container count + newImage.Containers = 0 + for _, c := range allContainers { + if c.ImageID == id { + newImage.Containers++ + } + } + + // count layer references + rootFS := *img.RootFS + rootFS.DiffIDs = nil + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + layerRefs[chid]++ + if _, ok := allLayers[chid]; !ok { + return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) + } + } + imagesMap[img] = newImage + } + + images = append(images, newImage) + } + + if withExtraAttrs { + // Get Shared sizes + for img, newImage := range imagesMap { + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + newImage.SharedSize = 0 + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + + diffSize, err := allLayers[chid].DiffSize() + if err != nil { + return nil, err + } + + if layerRefs[chid] > 1 { + newImage.SharedSize += diffSize + } + } + } + } + + sort.Sort(sort.Reverse(byCreated(images))) + + return images, nil +} + +// SquashImage creates a new image with the diff of the specified image and the specified parent. +// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between. +// The existing image(s) is not destroyed. +// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. +func (daemon *Daemon) SquashImage(id, parent string) (string, error) { + img, err := daemon.imageStore.Get(image.ID(id)) + if err != nil { + return "", err + } + + var parentImg *image.Image + var parentChainID layer.ChainID + if len(parent) != 0 { + parentImg, err = daemon.imageStore.Get(image.ID(parent)) + if err != nil { + return "", errors.Wrap(err, "error getting specified parent layer") + } + parentChainID = parentImg.RootFS.ChainID() + } else { + rootFS := image.NewRootFS() + parentImg = &image.Image{RootFS: rootFS} + } + + l, err := daemon.layerStore.Get(img.RootFS.ChainID()) + if err != nil { + return "", errors.Wrap(err, "error getting image layer") + } + defer daemon.layerStore.Release(l) + + ts, err := l.TarStreamFrom(parentChainID) + if err != nil { + return "", errors.Wrapf(err, "error getting tar stream to parent") + } + defer ts.Close() + + newL, err := daemon.layerStore.Register(ts, parentChainID) + if err != nil { + return "", errors.Wrap(err, "error registering layer") + } + defer daemon.layerStore.Release(newL) + + var newImage image.Image + newImage = *img + newImage.RootFS = nil + + var rootFS image.RootFS + rootFS = *parentImg.RootFS + rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) + newImage.RootFS = &rootFS + + for i, hi := range newImage.History { + if i >= len(parentImg.History) { + hi.EmptyLayer = true + } + newImage.History[i] = hi + } + + now := time.Now() + var historyComment string + if len(parent) > 0 { + historyComment = fmt.Sprintf("merge %s to %s", id, parent) + } else { + historyComment = fmt.Sprintf("create new from %s", id) + } + + newImage.History = append(newImage.History, image.History{ + Created: now, + Comment: historyComment, + }) + newImage.Created = now + + b, err := json.Marshal(&newImage) + if err != nil { + return "", errors.Wrap(err, "error marshalling image config") + } + + newImgID, err := daemon.imageStore.Create(b) + if err != nil { + return "", errors.Wrap(err, "error creating new image after squash") + } + return string(newImgID), nil +} + +func newImage(image *image.Image, size int64) *types.ImageSummary { + newImage := new(types.ImageSummary) + newImage.ParentID = image.Parent.String() + newImage.ID = image.ID().String() + newImage.Created = image.Created.Unix() + newImage.Size = size + newImage.VirtualSize = size + newImage.SharedSize = -1 + newImage.Containers = -1 + if image.Config != nil { + newImage.Labels = image.Config.Labels + } + return newImage +} diff --git a/components/engine/daemon/import.go b/components/engine/daemon/import.go new file mode 100644 index 0000000000..17b9d870a3 --- /dev/null +++ b/components/engine/daemon/import.go @@ -0,0 +1,133 @@ +package daemon + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "runtime" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/pkg/errors" +) + +// ImportImage imports an image, getting the archived layer data either from +// inConfig (if src is "-"), or from a URI specified in src. Progress output is +// written to outStream. Repository and tag names can optionally be given in +// the repo and tag arguments, respectively. +func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { + var ( + rc io.ReadCloser + resp *http.Response + newRef reference.Named + ) + + if repository != "" { + var err error + newRef, err = reference.ParseNormalizedNamed(repository) + if err != nil { + return err + } + if _, isCanonical := newRef.(reference.Canonical); isCanonical { + return errors.New("cannot import digest reference") + } + + if tag != "" { + newRef, err = reference.WithTag(newRef, tag) + if err != nil { + return err + } + } + } + + config, err := dockerfile.BuildFromConfig(&container.Config{}, changes) + if err != nil { + return err + } + if src == "-" { + rc = inConfig + } else { + inConfig.Close() + if len(strings.Split(src, "://")) == 1 { + src = "http://" + src + } + u, err := url.Parse(src) + if err != nil { + return err + } + + resp, err = httputils.Download(u.String()) + if err != nil { + return err + } + outStream.Write(streamformatter.FormatStatus("", "Downloading from %s", u)) + progressOutput := streamformatter.NewJSONProgressOutput(outStream, true) + rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") + } + + defer rc.Close() + if len(msg) == 0 { + msg = "Imported from " + src + } + + inflatedLayerData, err := archive.DecompressStream(rc) + if err != nil { + return err + } + // TODO: support windows baselayer? + l, err := daemon.layerStore.Register(inflatedLayerData, "") + if err != nil { + return err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + created := time.Now().UTC() + imgConfig, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Created: created, + Comment: msg, + }, + RootFS: &image.RootFS{ + Type: "layers", + DiffIDs: []layer.DiffID{l.DiffID()}, + }, + History: []image.History{{ + Created: created, + Comment: msg, + }}, + }) + if err != nil { + return err + } + + id, err := daemon.imageStore.Create(imgConfig) + if err != nil { + return err + } + + // FIXME: connect with commit code and call refstore directly + if newRef != nil { + if err := daemon.TagImageWithReference(id, newRef); err != nil { + return err + } + } + + daemon.LogImageEvent(id.String(), id.String(), "import") + outStream.Write(streamformatter.FormatStatus("", id.String())) + return nil +} diff --git a/components/engine/daemon/info.go b/components/engine/daemon/info.go new file mode 100644 index 0000000000..2be4b395fa --- /dev/null +++ b/components/engine/daemon/info.go @@ -0,0 +1,171 @@ +package daemon + +import ( + "fmt" + "os" + "runtime" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/debug" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/volume/drivers" + "github.com/docker/go-connections/sockets" +) + +// SystemInfo returns information about the host server the daemon is running on. +func (daemon *Daemon) SystemInfo() (*types.Info, error) { + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + + operatingSystem := "" + if s, err := operatingsystem.GetOperatingSystem(); err != nil { + logrus.Warnf("Could not get operating system name: %v", err) + } else { + operatingSystem = s + } + + // Don't do containerized check on Windows + if runtime.GOOS != "windows" { + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + logrus.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } + } + + meminfo, err := system.ReadMemInfo() + if err != nil { + logrus.Errorf("Could not read system memory info: %v", err) + meminfo = &system.MemInfo{} + } + + sysInfo := sysinfo.New(true) + cRunning, cPaused, cStopped := stateCtr.get() + + securityOptions := []string{} + if sysInfo.AppArmor { + securityOptions = append(securityOptions, "name=apparmor") + } + if sysInfo.Seccomp && supportsSeccomp { + profile := daemon.seccompProfilePath + if profile == "" { + profile = "default" + } + securityOptions = append(securityOptions, fmt.Sprintf("name=seccomp,profile=%s", profile)) + } + if selinuxEnabled() { + securityOptions = append(securityOptions, "name=selinux") + } + uid, gid := daemon.GetRemappedUIDGID() + if uid != 0 || gid != 0 { + securityOptions = append(securityOptions, "name=userns") + } + + v := &types.Info{ + ID: daemon.ID, + Containers: int(cRunning + cPaused + cStopped), + ContainersRunning: int(cRunning), + ContainersPaused: int(cPaused), + ContainersStopped: int(cStopped), + Images: len(daemon.imageStore.Map()), + Driver: daemon.GraphDriverName(), + DriverStatus: daemon.layerStore.DriverStatus(), + Plugins: daemon.showPluginsInfo(), + IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, + BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, + BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, + Debug: debug.IsEnabled(), + NFd: fileutils.GetTotalUsedFds(), + NGoroutines: runtime.NumGoroutine(), + SystemTime: time.Now().Format(time.RFC3339Nano), + LoggingDriver: daemon.defaultLogConfig.Type, + CgroupDriver: daemon.getCgroupDriver(), + NEventsListener: daemon.EventsService.SubscribersCount(), + KernelVersion: kernelVersion, + OperatingSystem: operatingSystem, + IndexServerAddress: registry.IndexServer, + OSType: platform.OSType, + Architecture: platform.Architecture, + RegistryConfig: daemon.RegistryService.ServiceConfig(), + NCPU: sysinfo.NumCPU(), + MemTotal: meminfo.MemTotal, + DockerRootDir: daemon.configStore.Root, + Labels: daemon.configStore.Labels, + ExperimentalBuild: daemon.configStore.Experimental, + ServerVersion: dockerversion.Version, + ClusterStore: daemon.configStore.ClusterStore, + ClusterAdvertise: daemon.configStore.ClusterAdvertise, + HTTPProxy: sockets.GetProxyEnv("http_proxy"), + HTTPSProxy: sockets.GetProxyEnv("https_proxy"), + NoProxy: sockets.GetProxyEnv("no_proxy"), + LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled, + SecurityOptions: securityOptions, + Isolation: daemon.defaultIsolation, + } + + // Retrieve platform specific info + daemon.FillPlatformInfo(v, sysInfo) + + hostname := "" + if hn, err := os.Hostname(); err != nil { + logrus.Warnf("Could not get hostname: %v", err) + } else { + hostname = hn + } + v.Name = hostname + + return v, nil +} + +// SystemVersion returns version information about the daemon. +func (daemon *Daemon) SystemVersion() types.Version { + v := types.Version{ + Version: dockerversion.Version, + GitCommit: dockerversion.GitCommit, + MinAPIVersion: api.MinVersion, + GoVersion: runtime.Version(), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + BuildTime: dockerversion.BuildTime, + Experimental: daemon.configStore.Experimental, + } + + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + v.KernelVersion = kernelVersion + + return v +} + +func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { + var pluginsInfo types.PluginsInfo + + pluginsInfo.Volume = volumedrivers.GetDriverList() + pluginsInfo.Network = daemon.GetNetworkDriverList() + // The authorization plugins are returned in the order they are + // used as they constitute a request/response modification chain. + pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins + pluginsInfo.Log = logger.ListDrivers() + + return pluginsInfo +} diff --git a/components/engine/daemon/info_unix.go b/components/engine/daemon/info_unix.go new file mode 100644 index 0000000000..5c387541b3 --- /dev/null +++ b/components/engine/daemon/info_unix.go @@ -0,0 +1,92 @@ +// +build !windows + +package daemon + +import ( + "context" + "os/exec" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + daemonconfig "github.com/docker/docker/daemon/config" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/sysinfo" + "github.com/pkg/errors" +) + +// FillPlatformInfo fills the platform related info. +func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { + v.MemoryLimit = sysInfo.MemoryLimit + v.SwapLimit = sysInfo.SwapLimit + v.KernelMemory = sysInfo.KernelMemory + v.OomKillDisable = sysInfo.OomKillDisable + v.CPUCfsPeriod = sysInfo.CPUCfsPeriod + v.CPUCfsQuota = sysInfo.CPUCfsQuota + v.CPUShares = sysInfo.CPUShares + v.CPUSet = sysInfo.Cpuset + v.Runtimes = daemon.configStore.GetAllRuntimes() + v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() + v.InitBinary = daemon.configStore.GetInitPath() + + v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID + if sv, err := daemon.containerd.GetServerVersion(context.Background()); err == nil { + v.ContainerdCommit.ID = sv.Revision + } else { + logrus.Warnf("failed to retrieve containerd version: %v", err) + v.ContainerdCommit.ID = "N/A" + } + + v.RuncCommit.Expected = dockerversion.RuncCommitID + if rv, err := exec.Command(DefaultRuntimeBinary, "--version").Output(); err == nil { + parts := strings.Split(strings.TrimSpace(string(rv)), "\n") + if len(parts) == 3 { + parts = strings.Split(parts[1], ": ") + if len(parts) == 2 { + v.RuncCommit.ID = strings.TrimSpace(parts[1]) + } + } + + if v.RuncCommit.ID == "" { + logrus.Warnf("failed to retrieve %s version: unknown output format: %s", DefaultRuntimeBinary, string(rv)) + v.RuncCommit.ID = "N/A" + } + } else { + logrus.Warnf("failed to retrieve %s version: %v", DefaultRuntimeBinary, err) + v.RuncCommit.ID = "N/A" + } + + if rv, err := exec.Command(daemonconfig.DefaultInitBinary, "--version").Output(); err == nil { + ver, err := parseInitVersion(string(rv)) + + if err != nil { + logrus.Warnf("failed to retrieve %s version: %s", daemonconfig.DefaultInitBinary, err) + } + v.InitCommit = ver + } else { + logrus.Warnf("failed to retrieve %s version: %s", daemonconfig.DefaultInitBinary, err) + v.InitCommit.ID = "N/A" + } +} + +// parseInitVersion parses a Tini version string, and extracts the version. +func parseInitVersion(v string) (types.Commit, error) { + version := types.Commit{ID: "", Expected: dockerversion.InitCommitID} + parts := strings.Split(strings.TrimSpace(v), " - ") + + if len(parts) >= 2 { + gitParts := strings.Split(parts[1], ".") + if len(gitParts) == 2 && gitParts[0] == "git" { + version.ID = gitParts[1] + version.Expected = dockerversion.InitCommitID[0:len(version.ID)] + } + } + if version.ID == "" && strings.HasPrefix(parts[0], "tini version ") { + version.ID = "v" + strings.TrimPrefix(parts[0], "tini version ") + } + if version.ID == "" { + version.ID = "N/A" + return version, errors.Errorf("unknown output format: %s", v) + } + return version, nil +} diff --git a/components/engine/daemon/info_unix_test.go b/components/engine/daemon/info_unix_test.go new file mode 100644 index 0000000000..ef36c40e39 --- /dev/null +++ b/components/engine/daemon/info_unix_test.go @@ -0,0 +1,52 @@ +// +build !windows + +package daemon + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/stretchr/testify/assert" +) + +func TestParseInitVersion(t *testing.T) { + tests := []struct { + version string + result types.Commit + invalid bool + }{ + { + version: "tini version 0.13.0 - git.949e6fa", + result: types.Commit{ID: "949e6fa", Expected: dockerversion.InitCommitID[0:7]}, + }, { + version: "tini version 0.13.0\n", + result: types.Commit{ID: "v0.13.0", Expected: dockerversion.InitCommitID}, + }, { + version: "tini version 0.13.2", + result: types.Commit{ID: "v0.13.2", Expected: dockerversion.InitCommitID}, + }, { + version: "tini version0.13.2", + result: types.Commit{ID: "N/A", Expected: dockerversion.InitCommitID}, + invalid: true, + }, { + version: "", + result: types.Commit{ID: "N/A", Expected: dockerversion.InitCommitID}, + invalid: true, + }, { + version: "hello world", + result: types.Commit{ID: "N/A", Expected: dockerversion.InitCommitID}, + invalid: true, + }, + } + + for _, test := range tests { + ver, err := parseInitVersion(string(test.version)) + if test.invalid { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, test.result, ver) + } +} diff --git a/components/engine/daemon/info_windows.go b/components/engine/daemon/info_windows.go new file mode 100644 index 0000000000..c700911eb0 --- /dev/null +++ b/components/engine/daemon/info_windows.go @@ -0,0 +1,10 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/sysinfo" +) + +// FillPlatformInfo fills the platform related info. +func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { +} diff --git a/components/engine/daemon/initlayer/setup_solaris.go b/components/engine/daemon/initlayer/setup_solaris.go new file mode 100644 index 0000000000..66d53f0eef --- /dev/null +++ b/components/engine/daemon/initlayer/setup_solaris.go @@ -0,0 +1,13 @@ +// +build solaris,cgo + +package initlayer + +// Setup populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + return nil +} diff --git a/components/engine/daemon/initlayer/setup_unix.go b/components/engine/daemon/initlayer/setup_unix.go new file mode 100644 index 0000000000..e83c2751ed --- /dev/null +++ b/components/engine/daemon/initlayer/setup_unix.go @@ -0,0 +1,69 @@ +// +build linux freebsd + +package initlayer + +import ( + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/idtools" +) + +// Setup populates a directory with mountpoints suitable +// for bind-mounting things into the container. +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + "/etc/mtab": "/proc/mounts", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = filepath.Join(prev, p) + syscall.Unlink(filepath.Join(initLayer, prev)) + } + + if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { + return err + } + switch typ { + case "dir": + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { + return err + } + case "file": + f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Chown(rootUID, rootGID) + f.Close() + default: + if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { + return err + } + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} diff --git a/components/engine/daemon/initlayer/setup_windows.go b/components/engine/daemon/initlayer/setup_windows.go new file mode 100644 index 0000000000..48a9d71aa5 --- /dev/null +++ b/components/engine/daemon/initlayer/setup_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package initlayer + +// Setup populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + return nil +} diff --git a/components/engine/daemon/inspect.go b/components/engine/daemon/inspect.go new file mode 100644 index 0000000000..4eb1d091d3 --- /dev/null +++ b/components/engine/daemon/inspect.go @@ -0,0 +1,269 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/go-connections/nat" +) + +// ContainerInspect returns low-level information about a +// container. Returns an error if the container cannot be found, or if +// there is an error getting the data. +func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (interface{}, error) { + switch { + case versions.LessThan(version, "1.20"): + return daemon.containerInspectPre120(name) + case versions.Equal(version, "1.20"): + return daemon.containerInspect120(name) + } + return daemon.ContainerInspectCurrent(name, size) +} + +// ContainerInspectCurrent returns low-level information about a +// container in a most recent api version. +func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + + base, err := daemon.getInspectData(container) + if err != nil { + container.Unlock() + return nil, err + } + + apiNetworks := make(map[string]*networktypes.EndpointSettings) + for name, epConf := range container.NetworkSettings.Networks { + if epConf.EndpointSettings != nil { + // We must make a copy of this pointer object otherwise it can race with other operations + apiNetworks[name] = epConf.EndpointSettings.Copy() + } + } + + mountPoints := addMountPoints(container) + networkSettings := &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: container.NetworkSettings.Bridge, + SandboxID: container.NetworkSettings.SandboxID, + HairpinMode: container.NetworkSettings.HairpinMode, + LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, + SandboxKey: container.NetworkSettings.SandboxKey, + SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, + SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), + Networks: apiNetworks, + } + + ports := make(nat.PortMap, len(container.NetworkSettings.Ports)) + for k, pm := range container.NetworkSettings.Ports { + ports[k] = pm + } + networkSettings.NetworkSettingsBase.Ports = ports + + container.Unlock() + + if size { + sizeRw, sizeRootFs := daemon.getSize(base.ID) + base.SizeRw = &sizeRw + base.SizeRootFs = &sizeRootFs + } + + return &types.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: container.Config, + NetworkSettings: networkSettings, + }, nil +} + +// containerInspect120 serializes the master version of a container into a json type. +func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container) + if err != nil { + return nil, err + } + + mountPoints := addMountPoints(container) + config := &v1p20.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p20.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func (daemon *Daemon) getInspectData(container *container.Container) (*types.ContainerJSONBase, error) { + // make a copy to play with + hostConfig := *container.HostConfig + + children := daemon.children(container) + hostConfig.Links = nil // do not expose the internal structure + for linkAlias, child := range children { + hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + } + + // We merge the Ulimits from hostConfig with daemon default + daemon.mergeUlimits(&hostConfig) + + var containerHealth *types.Health + if container.State.Health != nil { + containerHealth = &types.Health{ + Status: container.State.Health.Status, + FailingStreak: container.State.Health.FailingStreak, + Log: append([]*types.HealthcheckResult{}, container.State.Health.Log...), + } + } + + containerState := &types.ContainerState{ + Status: container.State.StateString(), + Running: container.State.Running, + Paused: container.State.Paused, + Restarting: container.State.Restarting, + OOMKilled: container.State.OOMKilled, + Dead: container.State.Dead, + Pid: container.State.Pid, + ExitCode: container.State.ExitCode(), + Error: container.State.ErrorMsg, + StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), + FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), + Health: containerHealth, + } + + contJSONBase := &types.ContainerJSONBase{ + ID: container.ID, + Created: container.Created.Format(time.RFC3339Nano), + Path: container.Path, + Args: container.Args, + State: containerState, + Image: container.ImageID.String(), + LogPath: container.LogPath, + Name: container.Name, + RestartCount: container.RestartCount, + Driver: container.Driver, + MountLabel: container.MountLabel, + ProcessLabel: container.ProcessLabel, + ExecIDs: container.GetExecIDs(), + HostConfig: &hostConfig, + } + + // Now set any platform-specific fields + contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) + + contJSONBase.GraphDriver.Name = container.Driver + + graphDriverData, err := container.RWLayer.Metadata() + // If container is marked as Dead, the container's graphdriver metadata + // could have been removed, it will cause error if we try to get the metadata, + // we can ignore the error if the container is dead. + if err != nil && !container.Dead { + return nil, err + } + contJSONBase.GraphDriver.Data = graphDriverData + + return contJSONBase, nil +} + +// ContainerExecInspect returns low-level information about the exec +// command. An error is returned if the exec cannot be found. +func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, error) { + e, err := daemon.getExecConfig(id) + if err != nil { + return nil, err + } + + pc := inspectExecProcessConfig(e) + + return &backend.ExecInspect{ + ID: e.ID, + Running: e.Running, + ExitCode: e.ExitCode, + ProcessConfig: pc, + OpenStdin: e.OpenStdin, + OpenStdout: e.OpenStdout, + OpenStderr: e.OpenStderr, + CanRemove: e.CanRemove, + ContainerID: e.ContainerID, + DetachKeys: e.DetachKeys, + Pid: e.Pid, + }, nil +} + +// VolumeInspect looks up a volume by name. An error is returned if +// the volume cannot be found. +func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { + v, err := daemon.volumes.Get(name) + if err != nil { + return nil, err + } + apiV := volumeToAPIType(v) + apiV.Mountpoint = v.Path() + apiV.Status = v.Status() + return apiV, nil +} + +func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { + result := &v1p20.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: settings.Bridge, + SandboxID: settings.SandboxID, + HairpinMode: settings.HairpinMode, + LinkLocalIPv6Address: settings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, + Ports: settings.Ports, + SandboxKey: settings.SandboxKey, + SecondaryIPAddresses: settings.SecondaryIPAddresses, + SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), + } + + return result +} + +// getDefaultNetworkSettings creates the deprecated structure that holds the information +// about the bridge network for a container. +func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*network.EndpointSettings) types.DefaultNetworkSettings { + var settings types.DefaultNetworkSettings + + if defaultNetwork, ok := networks["bridge"]; ok && defaultNetwork.EndpointSettings != nil { + settings.EndpointID = defaultNetwork.EndpointID + settings.Gateway = defaultNetwork.Gateway + settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address + settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen + settings.IPAddress = defaultNetwork.IPAddress + settings.IPPrefixLen = defaultNetwork.IPPrefixLen + settings.IPv6Gateway = defaultNetwork.IPv6Gateway + settings.MacAddress = defaultNetwork.MacAddress + } + return settings +} diff --git a/components/engine/daemon/inspect_solaris.go b/components/engine/daemon/inspect_solaris.go new file mode 100644 index 0000000000..0e3dcc1119 --- /dev/null +++ b/components/engine/daemon/inspect_solaris.go @@ -0,0 +1,41 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions/v1p19" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + return contJSONBase +} + +// containerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + return &v1p19.ContainerJSON{}, nil +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + } +} diff --git a/components/engine/daemon/inspect_unix.go b/components/engine/daemon/inspect_unix.go new file mode 100644 index 0000000000..8342f7cf98 --- /dev/null +++ b/components/engine/daemon/inspect_unix.go @@ -0,0 +1,92 @@ +// +build !windows,!solaris + +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions/v1p19" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + contJSONBase.AppArmorProfile = container.AppArmorProfile + contJSONBase.ResolvConfPath = container.ResolvConfPath + contJSONBase.HostnamePath = container.HostnamePath + contJSONBase.HostsPath = container.HostsPath + + return contJSONBase +} + +// containerInspectPre120 gets containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container) + if err != nil { + return nil, err + } + + volumes := make(map[string]string) + volumesRW := make(map[string]bool) + for _, m := range container.MountPoints { + volumes[m.Destination] = m.Path() + volumesRW[m.Destination] = m.RW + } + + config := &v1p19.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + Memory: container.HostConfig.Memory, + MemorySwap: container.HostConfig.MemorySwap, + CPUShares: container.HostConfig.CPUShares, + CPUSet: container.HostConfig.CpusetCpus, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p19.ContainerJSON{ + ContainerJSONBase: base, + Volumes: volumes, + VolumesRW: volumesRW, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + Mode: m.Mode, + RW: m.RW, + Propagation: m.Propagation, + }) + } + return mountPoints +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + Privileged: &e.Privileged, + User: e.User, + } +} diff --git a/components/engine/daemon/inspect_windows.go b/components/engine/daemon/inspect_windows.go new file mode 100644 index 0000000000..b331c83ca3 --- /dev/null +++ b/components/engine/daemon/inspect_windows.go @@ -0,0 +1,41 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + return contJSONBase +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} + +// containerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { + return daemon.ContainerInspectCurrent(name, false) +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + } +} diff --git a/components/engine/daemon/keys.go b/components/engine/daemon/keys.go new file mode 100644 index 0000000000..055d488a5d --- /dev/null +++ b/components/engine/daemon/keys.go @@ -0,0 +1,59 @@ +// +build linux + +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +const ( + rootKeyFile = "/proc/sys/kernel/keys/root_maxkeys" + rootBytesFile = "/proc/sys/kernel/keys/root_maxbytes" + rootKeyLimit = 1000000 + // it is standard configuration to allocate 25 bytes per key + rootKeyByteMultiplier = 25 +) + +// ModifyRootKeyLimit checks to see if the root key limit is set to +// at least 1000000 and changes it to that limit along with the maxbytes +// allocated to the keys at a 25 to 1 multiplier. +func ModifyRootKeyLimit() error { + value, err := readRootKeyLimit(rootKeyFile) + if err != nil { + return err + } + if value < rootKeyLimit { + return setRootKeyLimit(rootKeyLimit) + } + return nil +} + +func setRootKeyLimit(limit int) error { + keys, err := os.OpenFile(rootKeyFile, os.O_WRONLY, 0) + if err != nil { + return err + } + defer keys.Close() + if _, err := fmt.Fprintf(keys, "%d", limit); err != nil { + return err + } + bytes, err := os.OpenFile(rootBytesFile, os.O_WRONLY, 0) + if err != nil { + return err + } + defer bytes.Close() + _, err = fmt.Fprintf(bytes, "%d", limit*rootKeyByteMultiplier) + return err +} + +func readRootKeyLimit(path string) (int, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return -1, err + } + return strconv.Atoi(strings.Trim(string(data), "\n")) +} diff --git a/components/engine/daemon/keys_unsupported.go b/components/engine/daemon/keys_unsupported.go new file mode 100644 index 0000000000..e49baf9454 --- /dev/null +++ b/components/engine/daemon/keys_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux + +package daemon + +// ModifyRootKeyLimit is a noop on unsupported platforms. +func ModifyRootKeyLimit() error { + return nil +} diff --git a/components/engine/daemon/kill.go b/components/engine/daemon/kill.go new file mode 100644 index 0000000000..72e716d098 --- /dev/null +++ b/components/engine/daemon/kill.go @@ -0,0 +1,171 @@ +package daemon + +import ( + "context" + "fmt" + "runtime" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/signal" +) + +type errNoSuchProcess struct { + pid int + signal int +} + +func (e errNoSuchProcess) Error() string { + return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal) +} + +// isErrNoSuchProcess returns true if the error +// is an instance of errNoSuchProcess. +func isErrNoSuchProcess(err error) bool { + _, ok := err.(errNoSuchProcess) + return ok +} + +// ContainerKill sends signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (daemon *Daemon) ContainerKill(name string, sig uint64) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { + return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) + } + + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + return daemon.Kill(container) + } + return daemon.killWithSignal(container, int(sig)) +} + +// killWithSignal sends the container the given signal. This wrapper for the +// host specific kill command prepares the container before attempting +// to send the signal. An error is returned if the container is paused +// or not running, or if there is a problem returned from the +// underlying kill command. +func (daemon *Daemon) killWithSignal(container *containerpkg.Container, sig int) error { + logrus.Debugf("Sending kill signal %d to container %s", sig, container.ID) + container.Lock() + defer container.Unlock() + + // We could unpause the container for them rather than returning this error + if container.Paused { + return fmt.Errorf("Container %s is paused. Unpause the container before stopping or killing", container.ID) + } + + if !container.Running { + return errNotRunning{container.ID} + } + + if container.Config.StopSignal != "" { + containerStopSignal, err := signal.ParseSignal(container.Config.StopSignal) + if err != nil { + return err + } + if containerStopSignal == syscall.Signal(sig) { + container.ExitOnNext() + } + } else { + container.ExitOnNext() + } + + if !daemon.IsShuttingDown() { + container.HasBeenManuallyStopped = true + } + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on its next event + // loop is enough + if container.Restarting { + return nil + } + + if err := daemon.kill(container, sig); err != nil { + err = fmt.Errorf("Cannot kill container %s: %s", container.ID, err) + // if container or process not exists, ignore the error + if strings.Contains(err.Error(), "container not found") || + strings.Contains(err.Error(), "no such process") { + logrus.Warnf("container kill failed because of 'container not found' or 'no such process': %s", err.Error()) + } else { + return err + } + } + + attributes := map[string]string{ + "signal": fmt.Sprintf("%d", sig), + } + daemon.LogContainerEventWithAttributes(container, "kill", attributes) + return nil +} + +// Kill forcefully terminates a container. +func (daemon *Daemon) Kill(container *containerpkg.Container) error { + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + // 1. Send SIGKILL + if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // it's probably because it's already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we return it to the caller. + if isErrNoSuchProcess(err) { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { + return err + } + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if err := killProcessDirectly(container); err != nil { + if isErrNoSuchProcess(err) { + return nil + } + return err + } + + // Wait for exit with no timeout. + // Ignore returned status. + _ = <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) + + return nil +} + +// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. +func (daemon *Daemon) killPossiblyDeadProcess(container *containerpkg.Container, sig int) error { + err := daemon.killWithSignal(container, sig) + if err == syscall.ESRCH { + e := errNoSuchProcess{container.GetPID(), sig} + logrus.Debug(e) + return e + } + return err +} + +func (daemon *Daemon) kill(c *containerpkg.Container, sig int) error { + return daemon.containerd.Signal(c.ID, sig) +} diff --git a/components/engine/daemon/links.go b/components/engine/daemon/links.go new file mode 100644 index 0000000000..7f691d4f16 --- /dev/null +++ b/components/engine/daemon/links.go @@ -0,0 +1,87 @@ +package daemon + +import ( + "sync" + + "github.com/docker/docker/container" +) + +// linkIndex stores link relationships between containers, including their specified alias +// The alias is the name the parent uses to reference the child +type linkIndex struct { + // idx maps a parent->alias->child relationship + idx map[*container.Container]map[string]*container.Container + // childIdx maps child->parent->aliases + childIdx map[*container.Container]map[*container.Container]map[string]struct{} + mu sync.Mutex +} + +func newLinkIndex() *linkIndex { + return &linkIndex{ + idx: make(map[*container.Container]map[string]*container.Container), + childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}), + } +} + +// link adds indexes for the passed in parent/child/alias relationships +func (l *linkIndex) link(parent, child *container.Container, alias string) { + l.mu.Lock() + + if l.idx[parent] == nil { + l.idx[parent] = make(map[string]*container.Container) + } + l.idx[parent][alias] = child + if l.childIdx[child] == nil { + l.childIdx[child] = make(map[*container.Container]map[string]struct{}) + } + if l.childIdx[child][parent] == nil { + l.childIdx[child][parent] = make(map[string]struct{}) + } + l.childIdx[child][parent][alias] = struct{}{} + + l.mu.Unlock() +} + +// unlink removes the requested alias for the given parent/child +func (l *linkIndex) unlink(alias string, child, parent *container.Container) { + l.mu.Lock() + delete(l.idx[parent], alias) + delete(l.childIdx[child], parent) + l.mu.Unlock() +} + +// children maps all the aliases-> children for the passed in parent +// aliases here are the aliases the parent uses to refer to the child +func (l *linkIndex) children(parent *container.Container) map[string]*container.Container { + l.mu.Lock() + children := l.idx[parent] + l.mu.Unlock() + return children +} + +// parents maps all the aliases->parent for the passed in child +// aliases here are the aliases the parents use to refer to the child +func (l *linkIndex) parents(child *container.Container) map[string]*container.Container { + l.mu.Lock() + + parents := make(map[string]*container.Container) + for parent, aliases := range l.childIdx[child] { + for alias := range aliases { + parents[alias] = parent + } + } + + l.mu.Unlock() + return parents +} + +// delete deletes all link relationships referencing this container +func (l *linkIndex) delete(container *container.Container) { + l.mu.Lock() + for _, child := range l.idx[container] { + delete(l.childIdx[child], container) + } + delete(l.idx, container) + delete(l.childIdx, container) + l.mu.Unlock() +} diff --git a/components/engine/daemon/links/links.go b/components/engine/daemon/links/links.go new file mode 100644 index 0000000000..af15de046d --- /dev/null +++ b/components/engine/daemon/links/links.go @@ -0,0 +1,141 @@ +package links + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/go-connections/nat" +) + +// Link struct holds informations about parent/child linked container +type Link struct { + // Parent container IP address + ParentIP string + // Child container IP address + ChildIP string + // Link name + Name string + // Child environments variables + ChildEnvironment []string + // Child exposed ports + Ports []nat.Port +} + +// NewLink initializes a new Link struct with the provided options. +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) *Link { + var ( + i int + ports = make([]nat.Port, len(exposedPorts)) + ) + + for p := range exposedPorts { + ports[i] = p + i++ + } + + return &Link{ + Name: name, + ChildIP: childIP, + ParentIP: parentIP, + ChildEnvironment: env, + Ports: ports, + } +} + +// ToEnv creates a string's slice containing child container informations in +// the form of environment variables which will be later exported on container +// startup. +func (l *Link) ToEnv() []string { + env := []string{} + + _, n := path.Split(l.Name) + alias := strings.Replace(strings.ToUpper(n), "-", "_", -1) + + if p := l.getDefaultPort(); p != nil { + env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) + } + + //sort the ports so that we can bulk the continuous ports together + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + + for i := 0; i < len(l.Ports); { + p := l.Ports[i] + j := nextContiguous(l.Ports, p.Int(), i) + if j > i+1 { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + + q := l.Ports[j] + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) + + i = j + 1 + continue + } else { + i++ + } + } + for _, p := range l.Ports { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + } + + // Load the linked container's name into the environment + env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) + + if l.ChildEnvironment != nil { + for _, v := range l.ChildEnvironment { + parts := strings.SplitN(v, "=", 2) + if len(parts) < 2 { + continue + } + // Ignore a few variables that are added during docker build (and not really relevant to linked containers) + if parts[0] == "HOME" || parts[0] == "PATH" { + continue + } + env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) + } + } + return env +} + +func nextContiguous(ports []nat.Port, value int, index int) int { + if index+1 == len(ports) { + return index + } + for i := index + 1; i < len(ports); i++ { + if ports[i].Int() > value+1 { + return i - 1 + } + + value++ + } + return len(ports) - 1 +} + +// Default port rules +func (l *Link) getDefaultPort() *nat.Port { + var p nat.Port + i := len(l.Ports) + + if i == 0 { + return nil + } else if i > 1 { + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + } + p = l.Ports[0] + return &p +} diff --git a/components/engine/daemon/links/links_test.go b/components/engine/daemon/links/links_test.go new file mode 100644 index 0000000000..b852c44435 --- /dev/null +++ b/components/engine/daemon/links/links_test.go @@ -0,0 +1,213 @@ +package links + +import ( + "fmt" + "strings" + "testing" + + "github.com/docker/go-connections/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestLinkNaming(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + value, ok := env["DOCKER_1_PORT"] + + if !ok { + t.Fatal("DOCKER_1_PORT not found in env") + } + + if value != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"]) + } +} + +func TestLinkNew(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports) + + if link.Name != "/db/docker" { + t.Fail() + } + if link.ParentIP != "172.0.17.3" { + t.Fail() + } + if link.ChildIP != "172.0.17.2" { + t.Fail() + } + for _, p := range link.Ports { + if p != newPortNoError("tcp", "6379") { + t.Fail() + } + } +} + +func TestLinkEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkMultipleEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkPortRangeEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } + for _, i := range []int{6379, 6380, 6381} { + tcpaddr := fmt.Sprintf("DOCKER_PORT_%d_TCP_ADDR", i) + tcpport := fmt.Sprintf("DOCKER_PORT_%d_TCP_PORT", i) + tcpproto := fmt.Sprintf("DOCKER_PORT_%d_TCP_PROTO", i) + tcp := fmt.Sprintf("DOCKER_PORT_%d_TCP", i) + if env[tcpaddr] != "172.0.17.2" { + t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr]) + } + if env[tcpport] != fmt.Sprintf("%d", i) { + t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport]) + } + if env[tcpproto] != "tcp" { + t.Fatalf("Expected env %s = tcp, got %s", tcpproto, env[tcpproto]) + } + if env[tcp] != fmt.Sprintf("tcp://172.0.17.2:%d", i) { + t.Fatalf("Expected env %s = tcp://172.0.17.2:%d, got %s", tcp, i, env[tcp]) + } + } +} diff --git a/components/engine/daemon/list.go b/components/engine/daemon/list.go new file mode 100644 index 0000000000..4d831460f2 --- /dev/null +++ b/components/engine/daemon/list.go @@ -0,0 +1,737 @@ +package daemon + +import ( + "errors" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/volume" + "github.com/docker/go-connections/nat" +) + +var acceptedVolumeFilterTags = map[string]bool{ + "dangling": true, + "name": true, + "driver": true, + "label": true, +} + +var acceptedPsFilterTags = map[string]bool{ + "ancestor": true, + "before": true, + "exited": true, + "id": true, + "isolation": true, + "label": true, + "name": true, + "status": true, + "health": true, + "since": true, + "volume": true, + "network": true, + "is-task": true, + "publish": true, + "expose": true, +} + +// iterationAction represents possible outcomes happening during the container iteration. +type iterationAction int + +// containerReducer represents a reducer for a container. +// Returns the object to serialize by the api. +type containerReducer func(*container.Container, *listContext) (*types.Container, error) + +const ( + // includeContainer is the action to include a container in the reducer. + includeContainer iterationAction = iota + // excludeContainer is the action to exclude a container in the reducer. + excludeContainer + // stopIteration is the action to stop iterating over the list of containers. + stopIteration +) + +// errStopIteration makes the iterator to stop without returning an error. +var errStopIteration = errors.New("container list iteration stopped") + +// List returns an array of all containers registered in the daemon. +func (daemon *Daemon) List() []*container.Container { + return daemon.containers.List() +} + +// listContext is the daemon generated filtering to iterate over containers. +// This is created based on the user specification from types.ContainerListOptions. +type listContext struct { + // idx is the container iteration index for this context + idx int + // ancestorFilter tells whether it should check ancestors or not + ancestorFilter bool + // names is a list of container names to filter with + names map[string][]string + // images is a list of images to filter with + images map[image.ID]bool + // filters is a collection of arguments to filter with, specified by the user + filters filters.Args + // exitAllowed is a list of exit codes allowed to filter with + exitAllowed []int + + // beforeFilter is a filter to ignore containers that appear before the one given + beforeFilter *container.Container + // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container + sinceFilter *container.Container + + // taskFilter tells if we should filter based on wether a container is part of a task + taskFilter bool + // isTask tells us if the we should filter container that are a task (true) or not (false) + isTask bool + + // publish is a list of published ports to filter with + publish map[nat.Port]bool + // expose is a list of exposed ports to filter with + expose map[nat.Port]bool + + // ContainerListOptions is the filters set by the user + *types.ContainerListOptions +} + +// byContainerCreated is a temporary type used to sort a list of containers by creation time. +type byContainerCreated []*container.Container + +func (r byContainerCreated) Len() int { return len(r) } +func (r byContainerCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byContainerCreated) Less(i, j int) bool { + return r[i].Created.UnixNano() < r[j].Created.UnixNano() +} + +// Containers returns the list of containers to show given the user's filtering. +func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { + return daemon.reduceContainers(config, daemon.transformContainer) +} + +func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Container { + idSearch := false + names := ctx.filters.Get("name") + ids := ctx.filters.Get("id") + if len(names)+len(ids) == 0 { + // if name or ID filters are not in use, return to + // standard behavior of walking the entire container + // list from the daemon's in-memory store + return daemon.List() + } + + // idSearch will determine if we limit name matching to the IDs + // matched from any IDs which were specified as filters + if len(ids) > 0 { + idSearch = true + } + + matches := make(map[string]bool) + // find ID matches; errors represent "not found" and can be ignored + for _, id := range ids { + if fullID, err := daemon.idIndex.Get(id); err == nil { + matches[fullID] = true + } + } + + // look for name matches; if ID filtering was used, then limit the + // search space to the matches map only; errors represent "not found" + // and can be ignored + if len(names) > 0 { + for id, idNames := range ctx.names { + // if ID filters were used and no matches on that ID were + // found, continue to next ID in the list + if idSearch && !matches[id] { + continue + } + for _, eachName := range idNames { + if ctx.filters.Match("name", eachName) { + matches[id] = true + } + } + } + } + + cntrs := make([]*container.Container, 0, len(matches)) + for id := range matches { + if c := daemon.containers.Get(id); c != nil { + cntrs = append(cntrs, c) + } + } + + // Restore sort-order after filtering + // Created gives us nanosec resolution for sorting + sort.Sort(sort.Reverse(byContainerCreated(cntrs))) + + return cntrs +} + +// reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. +func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { + var ( + containers = []*types.Container{} + ) + + ctx, err := daemon.foldFilter(config) + if err != nil { + return nil, err + } + + // fastpath to only look at a subset of containers if specific name + // or ID matches were provided by the user--otherwise we potentially + // end up locking and querying many more containers than intended + containerList := daemon.filterByNameIDMatches(ctx) + + for _, container := range containerList { + t, err := daemon.reducePsContainer(container, ctx, reducer) + if err != nil { + if err != errStopIteration { + return nil, err + } + break + } + if t != nil { + containers = append(containers, t) + ctx.idx++ + } + } + + return containers, nil +} + +// reducePsContainer is the basic representation for a container as expected by the ps command. +func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) { + container.Lock() + + // filter containers to return + action := includeContainerInList(container, ctx) + switch action { + case excludeContainer: + container.Unlock() + return nil, nil + case stopIteration: + container.Unlock() + return nil, errStopIteration + } + + // transform internal container struct into api structs + newC, err := reducer(container, ctx) + container.Unlock() + if err != nil { + return nil, err + } + + // release lock because size calculation is slow + if ctx.Size { + sizeRw, sizeRootFs := daemon.getSize(newC.ID) + newC.SizeRw = sizeRw + newC.SizeRootFs = sizeRootFs + } + return newC, nil +} + +// foldFilter generates the container filter based on the user's filtering options. +func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listContext, error) { + psFilters := config.Filters + + if err := psFilters.Validate(acceptedPsFilterTags); err != nil { + return nil, err + } + + var filtExited []int + + err := psFilters.WalkValues("exited", func(value string) error { + code, err := strconv.Atoi(value) + if err != nil { + return err + } + filtExited = append(filtExited, code) + return nil + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("status", func(value string) error { + if !container.IsValidStateString(value) { + return fmt.Errorf("Unrecognised filter value for status: %s", value) + } + + config.All = true + return nil + }) + if err != nil { + return nil, err + } + + var taskFilter, isTask bool + if psFilters.Include("is-task") { + if psFilters.ExactMatch("is-task", "true") { + taskFilter = true + isTask = true + } else if psFilters.ExactMatch("is-task", "false") { + taskFilter = true + isTask = false + } else { + return nil, fmt.Errorf("Invalid filter 'is-task=%s'", psFilters.Get("is-task")) + } + } + + err = psFilters.WalkValues("health", func(value string) error { + if !container.IsValidHealthString(value) { + return fmt.Errorf("Unrecognised filter value for health: %s", value) + } + + return nil + }) + if err != nil { + return nil, err + } + + var beforeContFilter, sinceContFilter *container.Container + + err = psFilters.WalkValues("before", func(value string) error { + beforeContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("since", func(value string) error { + sinceContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + imagesFilter := map[image.ID]bool{} + var ancestorFilter bool + if psFilters.Include("ancestor") { + ancestorFilter = true + psFilters.WalkValues("ancestor", func(ancestor string) error { + id, err := daemon.GetImageID(ancestor) + if err != nil { + logrus.Warnf("Error while looking up for image %v", ancestor) + return nil + } + if imagesFilter[id] { + // Already seen this ancestor, skip it + return nil + } + // Then walk down the graph and put the imageIds in imagesFilter + populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) + return nil + }) + } + + publishFilter := map[nat.Port]bool{} + err = psFilters.WalkValues("publish", portOp("publish", publishFilter)) + if err != nil { + return nil, err + } + + exposeFilter := map[nat.Port]bool{} + err = psFilters.WalkValues("expose", portOp("expose", exposeFilter)) + if err != nil { + return nil, err + } + + return &listContext{ + filters: psFilters, + ancestorFilter: ancestorFilter, + images: imagesFilter, + exitAllowed: filtExited, + beforeFilter: beforeContFilter, + sinceFilter: sinceContFilter, + taskFilter: taskFilter, + isTask: isTask, + publish: publishFilter, + expose: exposeFilter, + ContainerListOptions: config, + names: daemon.nameIndex.GetAll(), + }, nil +} +func portOp(key string, filter map[nat.Port]bool) func(value string) error { + return func(value string) error { + if strings.Contains(value, ":") { + return fmt.Errorf("filter for '%s' should not contain ':': %s", key, value) + } + //support two formats, original format /[] or /[] + proto, port := nat.SplitProtoPort(value) + start, end, err := nat.ParsePortRange(port) + if err != nil { + return fmt.Errorf("error while looking up for %s %s: %s", key, value, err) + } + for i := start; i <= end; i++ { + p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) + if err != nil { + return fmt.Errorf("error while looking up for %s %s: %s", key, value, err) + } + filter[p] = true + } + return nil + } +} + +// includeContainerInList decides whether a container should be included in the output or not based in the filter. +// It also decides if the iteration should be stopped or not. +func includeContainerInList(container *container.Container, ctx *listContext) iterationAction { + // Do not include container if it's in the list before the filter container. + // Set the filter container to nil to include the rest of containers after this one. + if ctx.beforeFilter != nil { + if container.ID == ctx.beforeFilter.ID { + ctx.beforeFilter = nil + } + return excludeContainer + } + + // Stop iteration when the container arrives to the filter container + if ctx.sinceFilter != nil { + if container.ID == ctx.sinceFilter.ID { + return stopIteration + } + } + + // Do not include container if it's stopped and we're not filters + if !container.Running && !ctx.All && ctx.Limit <= 0 { + return excludeContainer + } + + // Do not include container if the name doesn't match + if !ctx.filters.Match("name", container.Name) { + return excludeContainer + } + + // Do not include container if the id doesn't match + if !ctx.filters.Match("id", container.ID) { + return excludeContainer + } + + if ctx.taskFilter { + if ctx.isTask != container.Managed { + return excludeContainer + } + } + + // Do not include container if any of the labels don't match + if !ctx.filters.MatchKVList("label", container.Config.Labels) { + return excludeContainer + } + + // Do not include container if isolation doesn't match + if excludeContainer == excludeByIsolation(container, ctx) { + return excludeContainer + } + + // Stop iteration when the index is over the limit + if ctx.Limit > 0 && ctx.idx == ctx.Limit { + return stopIteration + } + + // Do not include container if its exit code is not in the filter + if len(ctx.exitAllowed) > 0 { + shouldSkip := true + for _, code := range ctx.exitAllowed { + if code == container.ExitCode() && !container.Running && !container.StartedAt.IsZero() { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + // Do not include container if its status doesn't match the filter + if !ctx.filters.Match("status", container.State.StateString()) { + return excludeContainer + } + + // Do not include container if its health doesn't match the filter + if !ctx.filters.ExactMatch("health", container.State.HealthString()) { + return excludeContainer + } + + if ctx.filters.Include("volume") { + volumesByName := make(map[string]*volume.MountPoint) + for _, m := range container.MountPoints { + if m.Name != "" { + volumesByName[m.Name] = m + } else { + volumesByName[m.Source] = m + } + } + + volumeExist := fmt.Errorf("volume mounted in container") + err := ctx.filters.WalkValues("volume", func(value string) error { + if _, exist := container.MountPoints[value]; exist { + return volumeExist + } + if _, exist := volumesByName[value]; exist { + return volumeExist + } + return nil + }) + if err != volumeExist { + return excludeContainer + } + } + + if ctx.ancestorFilter { + if len(ctx.images) == 0 { + return excludeContainer + } + if !ctx.images[container.ImageID] { + return excludeContainer + } + } + + networkExist := fmt.Errorf("container part of network") + if ctx.filters.Include("network") { + err := ctx.filters.WalkValues("network", func(value string) error { + if _, ok := container.NetworkSettings.Networks[value]; ok { + return networkExist + } + for _, nw := range container.NetworkSettings.Networks { + if nw.EndpointSettings == nil { + continue + } + if strings.HasPrefix(nw.NetworkID, value) { + return networkExist + } + } + return nil + }) + if err != networkExist { + return excludeContainer + } + } + + if len(ctx.publish) > 0 { + shouldSkip := true + for port := range ctx.publish { + if _, ok := container.HostConfig.PortBindings[port]; ok { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + if len(ctx.expose) > 0 { + shouldSkip := true + for port := range ctx.expose { + if _, ok := container.Config.ExposedPorts[port]; ok { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + return includeContainer +} + +// transformContainer generates the container type expected by the docker ps command. +func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) { + newC := &types.Container{ + ID: container.ID, + Names: ctx.names[container.ID], + ImageID: container.ImageID.String(), + } + if newC.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + newC.Names = []string{} + } + + image := container.Config.Image // if possible keep the original ref + if image != container.ImageID.String() { + id, err := daemon.GetImageID(image) + if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { + return nil, err + } + if err != nil || id != container.ImageID { + image = container.ImageID.String() + } + } + newC.Image = image + + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + + newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + newC.Command = container.Path + } + newC.Created = container.Created.Unix() + newC.State = container.State.StateString() + newC.Status = container.State.String() + newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) + // copy networks to avoid races + networks := make(map[string]*networktypes.EndpointSettings) + for name, network := range container.NetworkSettings.Networks { + if network == nil || network.EndpointSettings == nil { + continue + } + networks[name] = &networktypes.EndpointSettings{ + EndpointID: network.EndpointID, + Gateway: network.Gateway, + IPAddress: network.IPAddress, + IPPrefixLen: network.IPPrefixLen, + IPv6Gateway: network.IPv6Gateway, + GlobalIPv6Address: network.GlobalIPv6Address, + GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, + MacAddress: network.MacAddress, + NetworkID: network.NetworkID, + } + if network.IPAMConfig != nil { + networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{ + IPv4Address: network.IPAMConfig.IPv4Address, + IPv6Address: network.IPAMConfig.IPv6Address, + } + } + } + newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} + + newC.Ports = []types.Port{} + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + return nil, err + } + if len(bindings) == 0 { + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: uint16(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + return nil, err + } + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: uint16(p), + PublicPort: uint16(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + + newC.Labels = container.Config.Labels + newC.Mounts = addMountPoints(container) + + return newC, nil +} + +// Volumes lists known volumes, using the filter to restrict the range +// of volumes returned. +func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { + var ( + volumesOut []*types.Volume + ) + volFilters, err := filters.FromParam(filter) + if err != nil { + return nil, nil, err + } + + if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { + return nil, nil, err + } + + volumes, warnings, err := daemon.volumes.List() + if err != nil { + return nil, nil, err + } + + filterVolumes, err := daemon.filterVolumes(volumes, volFilters) + if err != nil { + return nil, nil, err + } + for _, v := range filterVolumes { + apiV := volumeToAPIType(v) + if vv, ok := v.(interface { + CachedPath() string + }); ok { + apiV.Mountpoint = vv.CachedPath() + } else { + apiV.Mountpoint = v.Path() + } + volumesOut = append(volumesOut, apiV) + } + return volumesOut, warnings, nil +} + +// filterVolumes filters volume list according to user specified filter +// and returns user chosen volumes +func (daemon *Daemon) filterVolumes(vols []volume.Volume, filter filters.Args) ([]volume.Volume, error) { + // if filter is empty, return original volume list + if filter.Len() == 0 { + return vols, nil + } + + var retVols []volume.Volume + for _, vol := range vols { + if filter.Include("name") { + if !filter.Match("name", vol.Name()) { + continue + } + } + if filter.Include("driver") { + if !filter.ExactMatch("driver", vol.DriverName()) { + continue + } + } + if filter.Include("label") { + v, ok := vol.(volume.DetailedVolume) + if !ok { + continue + } + if !filter.MatchKVList("label", v.Labels()) { + continue + } + } + retVols = append(retVols, vol) + } + danglingOnly := false + if filter.Include("dangling") { + if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") { + danglingOnly = true + } else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", filter.Get("dangling")) + } + retVols = daemon.volumes.FilterByUsed(retVols, !danglingOnly) + } + return retVols, nil +} + +func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { + if !ancestorMap[imageID] { + for _, id := range getChildren(imageID) { + populateImageFilterByParents(ancestorMap, id, getChildren) + } + ancestorMap[imageID] = true + } +} diff --git a/components/engine/daemon/list_unix.go b/components/engine/daemon/list_unix.go new file mode 100644 index 0000000000..91c9caccf4 --- /dev/null +++ b/components/engine/daemon/list_unix.go @@ -0,0 +1,11 @@ +// +build linux freebsd solaris + +package daemon + +import "github.com/docker/docker/container" + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { + return includeContainer +} diff --git a/components/engine/daemon/list_windows.go b/components/engine/daemon/list_windows.go new file mode 100644 index 0000000000..7fbcd3af26 --- /dev/null +++ b/components/engine/daemon/list_windows.go @@ -0,0 +1,20 @@ +package daemon + +import ( + "strings" + + "github.com/docker/docker/container" +) + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { + i := strings.ToLower(string(container.HostConfig.Isolation)) + if i == "" { + i = "default" + } + if !ctx.filters.Match("isolation", i) { + return excludeContainer + } + return includeContainer +} diff --git a/components/engine/daemon/logdrivers_linux.go b/components/engine/daemon/logdrivers_linux.go new file mode 100644 index 0000000000..ad343c1e8e --- /dev/null +++ b/components/engine/daemon/logdrivers_linux.go @@ -0,0 +1,15 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/awslogs" + _ "github.com/docker/docker/daemon/logger/fluentd" + _ "github.com/docker/docker/daemon/logger/gcplogs" + _ "github.com/docker/docker/daemon/logger/gelf" + _ "github.com/docker/docker/daemon/logger/journald" + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/logentries" + _ "github.com/docker/docker/daemon/logger/splunk" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/components/engine/daemon/logdrivers_windows.go b/components/engine/daemon/logdrivers_windows.go new file mode 100644 index 0000000000..f3002b97e2 --- /dev/null +++ b/components/engine/daemon/logdrivers_windows.go @@ -0,0 +1,13 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/awslogs" + _ "github.com/docker/docker/daemon/logger/etwlogs" + _ "github.com/docker/docker/daemon/logger/fluentd" + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/logentries" + _ "github.com/docker/docker/daemon/logger/splunk" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/components/engine/daemon/logger/adapter.go b/components/engine/daemon/logger/adapter.go new file mode 100644 index 0000000000..e6d7598b44 --- /dev/null +++ b/components/engine/daemon/logger/adapter.go @@ -0,0 +1,135 @@ +package logger + +import ( + "io" + "os" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/plugins/logdriver" + "github.com/docker/docker/pkg/plugingetter" + "github.com/pkg/errors" +) + +// pluginAdapter takes a plugin and implements the Logger interface for logger +// instances +type pluginAdapter struct { + driverName string + id string + plugin logPlugin + fifoPath string + capabilities Capability + logInfo Info + + // synchronize access to the log stream and shared buffer + mu sync.Mutex + enc logdriver.LogEntryEncoder + stream io.WriteCloser + // buf is shared for each `Log()` call to reduce allocations. + // buf must be protected by mutex + buf logdriver.LogEntry +} + +func (a *pluginAdapter) Log(msg *Message) error { + a.mu.Lock() + + a.buf.Line = msg.Line + a.buf.TimeNano = msg.Timestamp.UnixNano() + a.buf.Partial = msg.Partial + a.buf.Source = msg.Source + + err := a.enc.Encode(&a.buf) + a.buf.Reset() + + a.mu.Unlock() + + PutMessage(msg) + return err +} + +func (a *pluginAdapter) Name() string { + return a.driverName +} + +func (a *pluginAdapter) Close() error { + a.mu.Lock() + defer a.mu.Unlock() + + if err := a.plugin.StopLogging(a.fifoPath); err != nil { + return err + } + + if err := a.stream.Close(); err != nil { + logrus.WithError(err).Error("error closing plugin fifo") + } + if err := os.Remove(a.fifoPath); err != nil && !os.IsNotExist(err) { + logrus.WithError(err).Error("error cleaning up plugin fifo") + } + + // may be nil, especially for unit tests + if pluginGetter != nil { + pluginGetter.Get(a.Name(), extName, plugingetter.Release) + } + return nil +} + +type pluginAdapterWithRead struct { + *pluginAdapter +} + +func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher { + watcher := NewLogWatcher() + + go func() { + defer close(watcher.Msg) + stream, err := a.plugin.ReadLogs(a.logInfo, config) + if err != nil { + watcher.Err <- errors.Wrap(err, "error getting log reader") + return + } + defer stream.Close() + + dec := logdriver.NewLogEntryDecoder(stream) + for { + select { + case <-watcher.WatchClose(): + return + default: + } + + var buf logdriver.LogEntry + if err := dec.Decode(&buf); err != nil { + if err == io.EOF { + return + } + select { + case watcher.Err <- errors.Wrap(err, "error decoding log message"): + case <-watcher.WatchClose(): + } + return + } + + msg := &Message{ + Timestamp: time.Unix(0, buf.TimeNano), + Line: buf.Line, + Source: buf.Source, + } + + // plugin should handle this, but check just in case + if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) { + continue + } + + select { + case watcher.Msg <- msg: + case <-watcher.WatchClose(): + // make sure the message we consumed is sent + watcher.Msg <- msg + return + } + } + }() + + return watcher +} diff --git a/components/engine/daemon/logger/adapter_test.go b/components/engine/daemon/logger/adapter_test.go new file mode 100644 index 0000000000..707550e7e3 --- /dev/null +++ b/components/engine/daemon/logger/adapter_test.go @@ -0,0 +1,208 @@ +package logger + +import ( + "bytes" + "encoding/binary" + "io" + "io/ioutil" + "os" + "runtime" + "testing" + "time" + + "github.com/docker/docker/api/types/plugins/logdriver" + protoio "github.com/gogo/protobuf/io" +) + +// mockLoggingPlugin implements the loggingPlugin interface for testing purposes +// it only supports a single log stream +type mockLoggingPlugin struct { + inStream io.ReadCloser + f *os.File + closed chan struct{} + t *testing.T +} + +func (l *mockLoggingPlugin) StartLogging(file string, info Info) error { + go func() { + io.Copy(l.f, l.inStream) + close(l.closed) + }() + return nil +} + +func (l *mockLoggingPlugin) StopLogging(file string) error { + l.inStream.Close() + l.f.Close() + os.Remove(l.f.Name()) + return nil +} + +func (l *mockLoggingPlugin) Capabilities() (cap Capability, err error) { + return Capability{ReadLogs: true}, nil +} + +func (l *mockLoggingPlugin) ReadLogs(info Info, config ReadConfig) (io.ReadCloser, error) { + r, w := io.Pipe() + f, err := os.Open(l.f.Name()) + if err != nil { + return nil, err + } + go func() { + defer f.Close() + dec := protoio.NewUint32DelimitedReader(f, binary.BigEndian, 1e6) + enc := logdriver.NewLogEntryEncoder(w) + + for { + select { + case <-l.closed: + w.Close() + return + default: + } + + var msg logdriver.LogEntry + if err := dec.ReadMsg(&msg); err != nil { + if err == io.EOF { + if !config.Follow { + w.Close() + return + } + dec = protoio.NewUint32DelimitedReader(f, binary.BigEndian, 1e6) + continue + } + + l.t.Fatal(err) + continue + } + + if err := enc.Encode(&msg); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return r, nil +} + +func newMockPluginAdapter(t *testing.T) Logger { + r, w := io.Pipe() + f, err := ioutil.TempFile("", "mock-plugin-adapter") + if err != nil { + t.Fatal(err) + } + enc := logdriver.NewLogEntryEncoder(w) + a := &pluginAdapterWithRead{ + &pluginAdapter{ + plugin: &mockLoggingPlugin{ + inStream: r, + f: f, + closed: make(chan struct{}), + t: t, + }, + stream: w, + enc: enc, + }, + } + a.plugin.StartLogging("", Info{}) + return a +} + +func TestAdapterReadLogs(t *testing.T) { + l := newMockPluginAdapter(t) + + testMsg := []Message{ + {Line: []byte("Are you the keymaker?"), Timestamp: time.Now()}, + {Line: []byte("Follow the white rabbit"), Timestamp: time.Now()}, + } + for _, msg := range testMsg { + m := msg.copy() + if err := l.Log(m); err != nil { + t.Fatal(err) + } + } + + lr, ok := l.(LogReader) + if !ok { + t.Fatal("expected log reader") + } + + lw := lr.ReadLogs(ReadConfig{}) + + for _, x := range testMsg { + select { + case msg := <-lw.Msg: + testMessageEqual(t, &x, msg) + case <-time.After(10 * time.Millisecond): + t.Fatal("timeout reading logs") + } + } + + select { + case _, ok := <-lw.Msg: + if ok { + t.Fatal("expected message channel to be closed") + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for message channel to close") + + } + lw.Close() + + lw = lr.ReadLogs(ReadConfig{Follow: true}) + for _, x := range testMsg { + select { + case msg := <-lw.Msg: + testMessageEqual(t, &x, msg) + case <-time.After(10 * time.Second): + t.Fatal("timeout reading logs") + } + } + + x := Message{Line: []byte("Too infinity and beyond!"), Timestamp: time.Now()} + + if err := l.Log(x.copy()); err != nil { + t.Fatal(err) + } + + select { + case msg, ok := <-lw.Msg: + if !ok { + t.Fatal("message channel unexpectedly closed") + } + testMessageEqual(t, &x, msg) + case <-time.After(10 * time.Second): + t.Fatal("timeout reading logs") + } + + l.Close() + select { + case msg, ok := <-lw.Msg: + if ok { + t.Fatal("expected message channel to be closed") + } + if msg != nil { + t.Fatal("expected nil message") + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for logger to close") + } +} + +func testMessageEqual(t *testing.T, a, b *Message) { + _, _, n, _ := runtime.Caller(1) + errFmt := "line %d: expected same messages:\nwant: %+v\nhave: %+v" + + if !bytes.Equal(a.Line, b.Line) { + t.Fatalf(errFmt, n, *a, *b) + } + + if a.Timestamp.UnixNano() != b.Timestamp.UnixNano() { + t.Fatalf(errFmt, n, *a, *b) + } + + if a.Source != b.Source { + t.Fatalf(errFmt, n, *a, *b) + } +} diff --git a/components/engine/daemon/logger/awslogs/cloudwatchlogs.go b/components/engine/daemon/logger/awslogs/cloudwatchlogs.go new file mode 100644 index 0000000000..d44f09d6a0 --- /dev/null +++ b/components/engine/daemon/logger/awslogs/cloudwatchlogs.go @@ -0,0 +1,598 @@ +// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs +package awslogs + +import ( + "bytes" + "fmt" + "os" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/templates" + "github.com/pkg/errors" +) + +const ( + name = "awslogs" + regionKey = "awslogs-region" + regionEnvKey = "AWS_REGION" + logGroupKey = "awslogs-group" + logStreamKey = "awslogs-stream" + logCreateGroupKey = "awslogs-create-group" + tagKey = "tag" + datetimeFormatKey = "awslogs-datetime-format" + multilinePatternKey = "awslogs-multiline-pattern" + batchPublishFrequency = 5 * time.Second + + // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html + perEventBytes = 26 + maximumBytesPerPut = 1048576 + maximumLogEventsPerPut = 10000 + + // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html + maximumBytesPerEvent = 262144 - perEventBytes + + resourceAlreadyExistsCode = "ResourceAlreadyExistsException" + dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" + invalidSequenceTokenCode = "InvalidSequenceTokenException" + resourceNotFoundCode = "ResourceNotFoundException" + + userAgentHeader = "User-Agent" +) + +type logStream struct { + logStreamName string + logGroupName string + logCreateGroup bool + multilinePattern *regexp.Regexp + client api + messages chan *logger.Message + lock sync.RWMutex + closed bool + sequenceToken *string +} + +type api interface { + CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) + CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) + PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) +} + +type regionFinder interface { + Region() (string, error) +} + +type wrappedEvent struct { + inputLogEvent *cloudwatchlogs.InputLogEvent + insertOrder int +} +type byTimestamp []wrappedEvent + +// init registers the awslogs driver +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates an awslogs logger using the configuration passed in on the +// context. Supported context configuration variables are awslogs-region, +// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-multiline-pattern +// and awslogs-datetime-format. When available, configuration is +// also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID, +// AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and +// the EC2 Instance Metadata Service. +func New(info logger.Info) (logger.Logger, error) { + logGroupName := info.Config[logGroupKey] + logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}") + if err != nil { + return nil, err + } + logCreateGroup := false + if info.Config[logCreateGroupKey] != "" { + logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey]) + if err != nil { + return nil, err + } + } + + if info.Config[logStreamKey] != "" { + logStreamName = info.Config[logStreamKey] + } + + multilinePattern, err := parseMultilineOptions(info) + if err != nil { + return nil, err + } + + client, err := newAWSLogsClient(info) + if err != nil { + return nil, err + } + containerStream := &logStream{ + logStreamName: logStreamName, + logGroupName: logGroupName, + logCreateGroup: logCreateGroup, + multilinePattern: multilinePattern, + client: client, + messages: make(chan *logger.Message, 4096), + } + err = containerStream.create() + if err != nil { + return nil, err + } + go containerStream.collectBatch() + + return containerStream, nil +} + +// Parses awslogs-multiline-pattern and awslogs-datetime-format options +// If awslogs-datetime-format is present, convert the format from strftime +// to regexp and return. +// If awslogs-multiline-pattern is present, compile regexp and return +func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { + dateTimeFormat := info.Config[datetimeFormatKey] + multilinePatternKey := info.Config[multilinePatternKey] + // strftime input is parsed into a regular expression + if dateTimeFormat != "" { + // %. matches each strftime format sequence and ReplaceAllStringFunc + // looks up each format sequence in the conversion table strftimeToRegex + // to replace with a defined regular expression + r := regexp.MustCompile("%.") + multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { + return strftimeToRegex[s] + }) + } + if multilinePatternKey != "" { + multilinePattern, err := regexp.Compile(multilinePatternKey) + if err != nil { + return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) + } + return multilinePattern, nil + } + return nil, nil +} + +// Maps strftime format strings to regex +var strftimeToRegex = map[string]string{ + /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, + /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, + /*weekdayZeroIndex */ `%w`: `[0-6]`, + /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, + /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, + /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, + /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, + /*yearCentury */ `%Y`: `\d{4}`, + /*yearZeroPadded */ `%y`: `\d{2}`, + /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, + /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, + /*AM or PM */ `%p`: "[A,P]M", + /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, + /*secondZeroPadded */ `%S`: `[0-5][0-9]`, + /*microsecondZeroPadded */ `%f`: `\d{6}`, + /*utcOffset */ `%z`: `[+-]\d{4}`, + /*tzName */ `%Z`: `[A-Z]{1,4}T`, + /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, + /*milliseconds */ `%L`: `\.\d{3}`, +} + +func parseLogGroup(info logger.Info, groupTemplate string) (string, error) { + tmpl, err := templates.NewParse("log-group", groupTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &info); err != nil { + return "", err + } + + return buf.String(), nil +} + +// newRegionFinder is a variable such that the implementation +// can be swapped out for unit tests. +var newRegionFinder = func() regionFinder { + return ec2metadata.New(session.New()) +} + +// newAWSLogsClient creates the service client for Amazon CloudWatch Logs. +// Customizations to the default client from the SDK include a Docker-specific +// User-Agent string and automatic region detection using the EC2 Instance +// Metadata Service when region is otherwise unspecified. +func newAWSLogsClient(info logger.Info) (api, error) { + var region *string + if os.Getenv(regionEnvKey) != "" { + region = aws.String(os.Getenv(regionEnvKey)) + } + if info.Config[regionKey] != "" { + region = aws.String(info.Config[regionKey]) + } + if region == nil || *region == "" { + logrus.Info("Trying to get region from EC2 Metadata") + ec2MetadataClient := newRegionFinder() + r, err := ec2MetadataClient.Region() + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + }).Error("Could not get region from EC2 metadata, environment, or log option") + return nil, errors.New("Cannot determine region for awslogs driver") + } + region = &r + } + logrus.WithFields(logrus.Fields{ + "region": *region, + }).Debug("Created awslogs client") + + client := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(*region)) + + client.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "DockerUserAgentHandler", + Fn: func(r *request.Request) { + currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) + r.HTTPRequest.Header.Set(userAgentHeader, + fmt.Sprintf("Docker %s (%s) %s", + dockerversion.Version, runtime.GOOS, currentAgent)) + }, + }) + return client, nil +} + +// Name returns the name of the awslogs logging driver +func (l *logStream) Name() string { + return name +} + +// Log submits messages for logging by an instance of the awslogs logging driver +func (l *logStream) Log(msg *logger.Message) error { + l.lock.RLock() + defer l.lock.RUnlock() + if !l.closed { + l.messages <- msg + } + return nil +} + +// Close closes the instance of the awslogs logging driver +func (l *logStream) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + if !l.closed { + close(l.messages) + } + l.closed = true + return nil +} + +// create creates log group and log stream for the instance of the awslogs logging driver +func (l *logStream) create() error { + if err := l.createLogStream(); err != nil { + if l.logCreateGroup { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode { + if err := l.createLogGroup(); err != nil { + return err + } + return l.createLogStream() + } + } + return err + } + + return nil +} + +// createLogGroup creates a log group for the instance of the awslogs logging driver +func (l *logStream) createLogGroup() error { + if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String(l.logGroupName), + }); err != nil { + if awsErr, ok := err.(awserr.Error); ok { + fields := logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logCreateGroup": l.logCreateGroup, + } + if awsErr.Code() == resourceAlreadyExistsCode { + // Allow creation to succeed + logrus.WithFields(fields).Info("Log group already exists") + return nil + } + logrus.WithFields(fields).Error("Failed to create log group") + } + return err + } + return nil +} + +// createLogStream creates a log stream for the instance of the awslogs logging driver +func (l *logStream) createLogStream() error { + input := &cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String(l.logGroupName), + LogStreamName: aws.String(l.logStreamName), + } + + _, err := l.client.CreateLogStream(input) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + fields := logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + } + if awsErr.Code() == resourceAlreadyExistsCode { + // Allow creation to succeed + logrus.WithFields(fields).Info("Log stream already exists") + return nil + } + logrus.WithFields(fields).Error("Failed to create log stream") + } + } + return err +} + +// newTicker is used for time-based batching. newTicker is a variable such +// that the implementation can be swapped out for unit tests. +var newTicker = func(freq time.Duration) *time.Ticker { + return time.NewTicker(freq) +} + +// collectBatch executes as a goroutine to perform batching of log events for +// submission to the log stream. If the awslogs-multiline-pattern or +// awslogs-datetime-format options have been configured, multiline processing +// is enabled, where log messages are stored in an event buffer until a multiline +// pattern match is found, at which point the messages in the event buffer are +// pushed to CloudWatch logs as a single log event. Multline messages are processed +// according to the maximumBytesPerPut constraint, and the implementation only +// allows for messages to be buffered for a maximum of 2*batchPublishFrequency +// seconds. When events are ready to be processed for submission to CloudWatch +// Logs, the processEvents method is called. If a multiline pattern is not +// configured, log events are submitted to the processEvents method immediately. +func (l *logStream) collectBatch() { + timer := newTicker(batchPublishFrequency) + var events []wrappedEvent + var eventBuffer []byte + var eventBufferTimestamp int64 + for { + select { + case t := <-timer.C: + // If event buffer is older than batch publish frequency flush the event buffer + if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { + eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp + eventBufferExpired := eventBufferAge > int64(batchPublishFrequency)/int64(time.Millisecond) + eventBufferNegative := eventBufferAge < 0 + if eventBufferExpired || eventBufferNegative { + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + } + } + l.publishBatch(events) + events = events[:0] + case msg, more := <-l.messages: + if !more { + // Flush event buffer + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + l.publishBatch(events) + return + } + if eventBufferTimestamp == 0 { + eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) + } + unprocessedLine := msg.Line + if l.multilinePattern != nil { + if l.multilinePattern.Match(unprocessedLine) { + // This is a new log event so flush the current eventBuffer to events + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) + eventBuffer = eventBuffer[:0] + } + // If we will exceed max bytes per event flush the current event buffer before appending + if len(eventBuffer)+len(unprocessedLine) > maximumBytesPerEvent { + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + eventBuffer = eventBuffer[:0] + } + // Append new line + processedLine := append(unprocessedLine, "\n"...) + eventBuffer = append(eventBuffer, processedLine...) + logger.PutMessage(msg) + } else { + events = l.processEvent(events, unprocessedLine, msg.Timestamp.UnixNano()/int64(time.Millisecond)) + logger.PutMessage(msg) + } + } + } +} + +// processEvent processes log events that are ready for submission to CloudWatch +// logs. Batching is performed on time- and size-bases. Time-based batching +// occurs at a 5 second interval (defined in the batchPublishFrequency const). +// Size-based batching is performed on the maximum number of events per batch +// (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a +// batch (defined in maximumBytesPerPut). Log messages are split by the maximum +// bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event +// byte overhead (defined in perEventBytes) which is accounted for in split- and +// batch-calculations. +func (l *logStream) processEvent(events []wrappedEvent, unprocessedLine []byte, timestamp int64) []wrappedEvent { + bytes := 0 + for len(unprocessedLine) > 0 { + // Split line length so it does not exceed the maximum + lineBytes := len(unprocessedLine) + if lineBytes > maximumBytesPerEvent { + lineBytes = maximumBytesPerEvent + } + line := unprocessedLine[:lineBytes] + unprocessedLine = unprocessedLine[lineBytes:] + if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) { + // Publish an existing batch if it's already over the maximum number of events or if adding this + // event would push it over the maximum number of total bytes. + l.publishBatch(events) + events = events[:0] + bytes = 0 + } + events = append(events, wrappedEvent{ + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(string(line)), + Timestamp: aws.Int64(timestamp), + }, + insertOrder: len(events), + }) + bytes += (lineBytes + perEventBytes) + } + return events +} + +// publishBatch calls PutLogEvents for a given set of InputLogEvents, +// accounting for sequencing requirements (each request must reference the +// sequence token returned by the previous request). +func (l *logStream) publishBatch(events []wrappedEvent) { + if len(events) == 0 { + return + } + + // events in a batch must be sorted by timestamp + // see http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html + sort.Sort(byTimestamp(events)) + cwEvents := unwrapEvents(events) + + nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == dataAlreadyAcceptedCode { + // already submitted, just grab the correct sequence token + parts := strings.Split(awsErr.Message(), " ") + nextSequenceToken = &parts[len(parts)-1] + logrus.WithFields(logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + }).Info("Data already accepted, ignoring error") + err = nil + } else if awsErr.Code() == invalidSequenceTokenCode { + // sequence code is bad, grab the correct one and retry + parts := strings.Split(awsErr.Message(), " ") + token := parts[len(parts)-1] + nextSequenceToken, err = l.putLogEvents(cwEvents, &token) + } + } + } + if err != nil { + logrus.Error(err) + } else { + l.sequenceToken = nextSequenceToken + } +} + +// putLogEvents wraps the PutLogEvents API +func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { + input := &cloudwatchlogs.PutLogEventsInput{ + LogEvents: events, + SequenceToken: sequenceToken, + LogGroupName: aws.String(l.logGroupName), + LogStreamName: aws.String(l.logStreamName), + } + resp, err := l.client.PutLogEvents(input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + logrus.WithFields(logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + }).Error("Failed to put log events") + } + return nil, err + } + return resp.NextSequenceToken, nil +} + +// ValidateLogOpt looks for awslogs-specific log options awslogs-region, +// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, +// awslogs-multiline-pattern +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case logGroupKey: + case logStreamKey: + case logCreateGroupKey: + case regionKey: + case tagKey: + case datetimeFormatKey: + case multilinePatternKey: + default: + return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) + } + } + if cfg[logGroupKey] == "" { + return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) + } + if cfg[logCreateGroupKey] != "" { + if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil { + return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) + } + } + _, datetimeFormatKeyExists := cfg[datetimeFormatKey] + _, multilinePatternKeyExists := cfg[multilinePatternKey] + if datetimeFormatKeyExists && multilinePatternKeyExists { + return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) + } + return nil +} + +// Len returns the length of a byTimestamp slice. Len is required by the +// sort.Interface interface. +func (slice byTimestamp) Len() int { + return len(slice) +} + +// Less compares two values in a byTimestamp slice by Timestamp. Less is +// required by the sort.Interface interface. +func (slice byTimestamp) Less(i, j int) bool { + iTimestamp, jTimestamp := int64(0), int64(0) + if slice != nil && slice[i].inputLogEvent.Timestamp != nil { + iTimestamp = *slice[i].inputLogEvent.Timestamp + } + if slice != nil && slice[j].inputLogEvent.Timestamp != nil { + jTimestamp = *slice[j].inputLogEvent.Timestamp + } + if iTimestamp == jTimestamp { + return slice[i].insertOrder < slice[j].insertOrder + } + return iTimestamp < jTimestamp +} + +// Swap swaps two values in a byTimestamp slice with each other. Swap is +// required by the sort.Interface interface. +func (slice byTimestamp) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { + cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) + for i, input := range events { + cwEvents[i] = input.inputLogEvent + } + return cwEvents +} diff --git a/components/engine/daemon/logger/awslogs/cloudwatchlogs_test.go b/components/engine/daemon/logger/awslogs/cloudwatchlogs_test.go new file mode 100644 index 0000000000..e3862ffebe --- /dev/null +++ b/components/engine/daemon/logger/awslogs/cloudwatchlogs_test.go @@ -0,0 +1,1053 @@ +package awslogs + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "regexp" + "runtime" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/dockerversion" + "github.com/stretchr/testify/assert" +) + +const ( + groupName = "groupName" + streamName = "streamName" + sequenceToken = "sequenceToken" + nextSequenceToken = "nextSequenceToken" + logline = "this is a log line\r" + multilineLogline = "2017-01-01 01:01:44 This is a multiline log entry\r" +) + +// Generates i multi-line events each with j lines +func (l *logStream) logGenerator(lineCount int, multilineCount int) { + for i := 0; i < multilineCount; i++ { + l.Log(&logger.Message{ + Line: []byte(multilineLogline), + Timestamp: time.Time{}, + }) + for j := 0; j < lineCount; j++ { + l.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + } + } +} + +func TestNewAWSLogsClientUserAgentHandler(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + regionKey: "us-east-1", + }, + } + + client, err := newAWSLogsClient(info) + if err != nil { + t.Fatal(err) + } + realClient, ok := client.(*cloudwatchlogs.CloudWatchLogs) + if !ok { + t.Fatal("Could not cast client to cloudwatchlogs.CloudWatchLogs") + } + buildHandlerList := realClient.Handlers.Build + request := &request.Request{ + HTTPRequest: &http.Request{ + Header: http.Header{}, + }, + } + buildHandlerList.Run(request) + expectedUserAgentString := fmt.Sprintf("Docker %s (%s) %s/%s (%s; %s; %s)", + dockerversion.Version, runtime.GOOS, aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) + userAgent := request.HTTPRequest.Header.Get("User-Agent") + if userAgent != expectedUserAgentString { + t.Errorf("Wrong User-Agent string, expected \"%s\" but was \"%s\"", + expectedUserAgentString, userAgent) + } +} + +func TestNewAWSLogsClientRegionDetect(t *testing.T) { + info := logger.Info{ + Config: map[string]string{}, + } + + mockMetadata := newMockMetadataClient() + newRegionFinder = func() regionFinder { + return mockMetadata + } + mockMetadata.regionResult <- ®ionResult{ + successResult: "us-east-1", + } + + _, err := newAWSLogsClient(info) + if err != nil { + t.Fatal(err) + } +} + +func TestCreateSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + } + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + if argument.LogGroupName == nil { + t.Fatal("Expected non-nil LogGroupName") + } + if *argument.LogGroupName != groupName { + t.Errorf("Expected LogGroupName to be %s", groupName) + } + if argument.LogStreamName == nil { + t.Fatal("Expected non-nil LogStreamName") + } + if *argument.LogStreamName != streamName { + t.Errorf("Expected LogStreamName to be %s", streamName) + } +} + +func TestCreateLogGroupSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + logCreateGroup: true, + } + mockClient.createLogGroupResult <- &createLogGroupResult{} + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + if argument.LogGroupName == nil { + t.Fatal("Expected non-nil LogGroupName") + } + if *argument.LogGroupName != groupName { + t.Errorf("Expected LogGroupName to be %s", groupName) + } + if argument.LogStreamName == nil { + t.Fatal("Expected non-nil LogStreamName") + } + if *argument.LogStreamName != streamName { + t.Errorf("Expected LogStreamName to be %s", streamName) + } +} + +func TestCreateError(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + } + mockClient.createLogStreamResult <- &createLogStreamResult{ + errorResult: errors.New("Error!"), + } + + err := stream.create() + + if err == nil { + t.Fatal("Expected non-nil err") + } +} + +func TestCreateAlreadyExists(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + } + mockClient.createLogStreamResult <- &createLogStreamResult{ + errorResult: awserr.New(resourceAlreadyExistsCode, "", nil), + } + + err := stream.create() + + if err != nil { + t.Fatal("Expected nil err") + } +} + +func TestPublishBatchSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != nextSequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) + } + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestPublishBatchError(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: errors.New("Error!"), + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != sequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", sequenceToken, *stream.sequenceToken) + } +} + +func TestPublishBatchInvalidSeqSuccess(t *testing.T) { + mockClient := newMockClientBuffered(2) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: awserr.New(invalidSequenceTokenCode, "use token token", nil), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != nextSequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) + } + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } + + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != "token" { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", "token", *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestPublishBatchAlreadyAccepted(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: awserr.New(dataAlreadyAcceptedCode, "use token token", nil), + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != "token" { + t.Errorf("Expected sequenceToken to be %s, but was %s", "token", *stream.sequenceToken) + } + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestCollectBatchSimple(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline { + t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) + } +} + +func TestCollectBatchTicker(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline + " 1"), + Timestamp: time.Time{}, + }) + stream.Log(&logger.Message{ + Line: []byte(logline + " 2"), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + + // Verify first batch + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 2 { + t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline+" 1" { + t.Errorf("Expected message to be %s but was %s", logline+" 1", *argument.LogEvents[0].Message) + } + if *argument.LogEvents[1].Message != logline+" 2" { + t.Errorf("Expected message to be %s but was %s", logline+" 2", *argument.LogEvents[0].Message) + } + + stream.Log(&logger.Message{ + Line: []byte(logline + " 3"), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline+" 3" { + t.Errorf("Expected message to be %s but was %s", logline+" 3", *argument.LogEvents[0].Message) + } + + stream.Close() + +} + +func TestCollectBatchMultilinePattern(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + stream.Log(&logger.Message{ + Line: []byte("xxxx " + logline), + Timestamp: time.Now(), + }) + + ticks <- time.Now() + + // Verify single multiline event + argument := <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + + stream.Close() + + // Verify single event + argument = <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, "xxxx "+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") +} + +func BenchmarkCollectBatch(b *testing.B) { + for i := 0; i < b.N; i++ { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + stream.logGenerator(10, 100) + ticks <- time.Time{} + stream.Close() + } +} + +func BenchmarkCollectBatchMultilinePattern(b *testing.B) { + for i := 0; i < b.N; i++ { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile(`\d{4}-(?:0[1-9]|1[0-2])-(?:0[1-9]|[1,2][0-9]|3[0,1]) (?:[0,1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]`) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + go stream.collectBatch() + stream.logGenerator(10, 100) + ticks <- time.Time{} + stream.Close() + } +} + +func TestCollectBatchMultilinePatternMaxEventAge(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + + // Log an event 1 second later + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now().Add(time.Second), + }) + + // Fire ticker batchPublishFrequency seconds later + ticks <- time.Now().Add(batchPublishFrequency * time.Second) + + // Verify single multiline event is flushed after maximum event buffer age (batchPublishFrequency) + argument := <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + + stream.Close() +} + +func TestCollectBatchMultilinePatternNegativeEventAge(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + + // Log an event 1 second later + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now().Add(time.Second), + }) + + // Fire ticker in past to simulate negative event buffer age + ticks <- time.Now().Add(-time.Second) + + // Verify single multiline event is flushed with a negative event buffer age + argument := <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + + stream.Close() +} + +func TestCollectBatchClose(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline { + t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) + } +} + +func TestCollectBatchLineSplit(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + longline := strings.Repeat("A", maximumBytesPerEvent) + stream.Log(&logger.Message{ + Line: []byte(longline + "B"), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 2 { + t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != longline { + t.Errorf("Expected message to be %s but was %s", longline, *argument.LogEvents[0].Message) + } + if *argument.LogEvents[1].Message != "B" { + t.Errorf("Expected message to be %s but was %s", "B", *argument.LogEvents[1].Message) + } +} + +func TestCollectBatchMaxEvents(t *testing.T) { + mockClient := newMockClientBuffered(1) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + line := "A" + for i := 0; i <= maximumLogEventsPerPut; i++ { + stream.Log(&logger.Message{ + Line: []byte(line), + Timestamp: time.Time{}, + }) + } + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != maximumLogEventsPerPut { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", maximumLogEventsPerPut, len(argument.LogEvents)) + } + + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", 1, len(argument.LogEvents)) + } +} + +func TestCollectBatchMaxTotalBytes(t *testing.T) { + mockClient := newMockClientBuffered(1) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + longline := strings.Repeat("A", maximumBytesPerPut) + stream.Log(&logger.Message{ + Line: []byte(longline + "B"), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + bytes := 0 + for _, event := range argument.LogEvents { + bytes += len(*event.Message) + } + if bytes > maximumBytesPerPut { + t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes) + } + + argument = <-mockClient.putLogEventsArgument + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) + } + message := *argument.LogEvents[0].Message + if message[len(message)-1:] != "B" { + t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:]) + } +} + +func TestCollectBatchWithDuplicateTimestamps(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + times := maximumLogEventsPerPut + expectedEvents := []*cloudwatchlogs.InputLogEvent{} + timestamp := time.Now() + for i := 0; i < times; i++ { + line := fmt.Sprintf("%d", i) + if i%2 == 0 { + timestamp.Add(1 * time.Nanosecond) + } + stream.Log(&logger.Message{ + Line: []byte(line), + Timestamp: timestamp, + }) + expectedEvents = append(expectedEvents, &cloudwatchlogs.InputLogEvent{ + Message: aws.String(line), + Timestamp: aws.Int64(timestamp.UnixNano() / int64(time.Millisecond)), + }) + } + + ticks <- time.Time{} + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != times { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", times, len(argument.LogEvents)) + } + for i := 0; i < times; i++ { + if !reflect.DeepEqual(*argument.LogEvents[i], *expectedEvents[i]) { + t.Errorf("Expected event to be %v but was %v", *expectedEvents[i], *argument.LogEvents[i]) + } + } +} + +func TestParseLogOptionsMultilinePattern(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + multilinePatternKey: "^xxxx", + }, + } + + multilinePattern, err := parseMultilineOptions(info) + assert.Nil(t, err, "Received unexpected error") + assert.True(t, multilinePattern.MatchString("xxxx"), "No multiline pattern match found") +} + +func TestParseLogOptionsDatetimeFormat(t *testing.T) { + datetimeFormatTests := []struct { + format string + match string + }{ + {"%d/%m/%y %a %H:%M:%S%L %Z", "31/12/10 Mon 08:42:44.345 NZDT"}, + {"%Y-%m-%d %A %I:%M:%S.%f%p%z", "2007-12-04 Monday 08:42:44.123456AM+1200"}, + {"%b|%b|%b|%b|%b|%b|%b|%b|%b|%b|%b|%b", "Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec"}, + {"%B|%B|%B|%B|%B|%B|%B|%B|%B|%B|%B|%B", "January|February|March|April|May|June|July|August|September|October|November|December"}, + {"%A|%A|%A|%A|%A|%A|%A", "Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday"}, + {"%a|%a|%a|%a|%a|%a|%a", "Mon|Tue|Wed|Thu|Fri|Sat|Sun"}, + {"Day of the week: %w, Day of the year: %j", "Day of the week: 4, Day of the year: 091"}, + } + for _, dt := range datetimeFormatTests { + t.Run(dt.match, func(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + datetimeFormatKey: dt.format, + }, + } + multilinePattern, err := parseMultilineOptions(info) + assert.Nil(t, err, "Received unexpected error") + assert.True(t, multilinePattern.MatchString(dt.match), "No multiline pattern match found") + }) + } +} + +func TestValidateLogOptionsDatetimeFormatAndMultilinePattern(t *testing.T) { + cfg := map[string]string{ + multilinePatternKey: "^xxxx", + datetimeFormatKey: "%Y-%m-%d", + logGroupKey: groupName, + } + conflictingLogOptionsError := "you cannot configure log opt 'awslogs-datetime-format' and 'awslogs-multiline-pattern' at the same time" + + err := ValidateLogOpt(cfg) + assert.NotNil(t, err, "Expected an error") + assert.Equal(t, err.Error(), conflictingLogOptionsError, "Received invalid error") +} + +func TestCreateTagSuccess(t *testing.T) { + mockClient := newMockClient() + info := logger.Info{ + ContainerName: "/test-container", + ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", + Config: map[string]string{"tag": "{{.Name}}/{{.FullID}}"}, + } + logStreamName, e := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if e != nil { + t.Errorf("Error generating tag: %q", e) + } + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: logStreamName, + } + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + + if *argument.LogStreamName != "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890" { + t.Errorf("Expected LogStreamName to be %s", "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890") + } +} + +func BenchmarkUnwrapEvents(b *testing.B) { + events := make([]wrappedEvent, maximumLogEventsPerPut) + for i := 0; i < maximumLogEventsPerPut; i++ { + mes := strings.Repeat("0", maximumBytesPerEvent) + events[i].inputLogEvent = &cloudwatchlogs.InputLogEvent{ + Message: &mes, + } + } + + as := assert.New(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + res := unwrapEvents(events) + as.Len(res, maximumLogEventsPerPut) + } +} diff --git a/components/engine/daemon/logger/awslogs/cwlogsiface_mock_test.go b/components/engine/daemon/logger/awslogs/cwlogsiface_mock_test.go new file mode 100644 index 0000000000..82bb34b0a6 --- /dev/null +++ b/components/engine/daemon/logger/awslogs/cwlogsiface_mock_test.go @@ -0,0 +1,92 @@ +package awslogs + +import "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + +type mockcwlogsclient struct { + createLogGroupArgument chan *cloudwatchlogs.CreateLogGroupInput + createLogGroupResult chan *createLogGroupResult + createLogStreamArgument chan *cloudwatchlogs.CreateLogStreamInput + createLogStreamResult chan *createLogStreamResult + putLogEventsArgument chan *cloudwatchlogs.PutLogEventsInput + putLogEventsResult chan *putLogEventsResult +} + +type createLogGroupResult struct { + successResult *cloudwatchlogs.CreateLogGroupOutput + errorResult error +} + +type createLogStreamResult struct { + successResult *cloudwatchlogs.CreateLogStreamOutput + errorResult error +} + +type putLogEventsResult struct { + successResult *cloudwatchlogs.PutLogEventsOutput + errorResult error +} + +func newMockClient() *mockcwlogsclient { + return &mockcwlogsclient{ + createLogGroupArgument: make(chan *cloudwatchlogs.CreateLogGroupInput, 1), + createLogGroupResult: make(chan *createLogGroupResult, 1), + createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, 1), + createLogStreamResult: make(chan *createLogStreamResult, 1), + putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, 1), + putLogEventsResult: make(chan *putLogEventsResult, 1), + } +} + +func newMockClientBuffered(buflen int) *mockcwlogsclient { + return &mockcwlogsclient{ + createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, buflen), + createLogStreamResult: make(chan *createLogStreamResult, buflen), + putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, buflen), + putLogEventsResult: make(chan *putLogEventsResult, buflen), + } +} + +func (m *mockcwlogsclient) CreateLogGroup(input *cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) { + m.createLogGroupArgument <- input + output := <-m.createLogGroupResult + return output.successResult, output.errorResult +} + +func (m *mockcwlogsclient) CreateLogStream(input *cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { + m.createLogStreamArgument <- input + output := <-m.createLogStreamResult + return output.successResult, output.errorResult +} + +func (m *mockcwlogsclient) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + events := make([]*cloudwatchlogs.InputLogEvent, len(input.LogEvents)) + copy(events, input.LogEvents) + m.putLogEventsArgument <- &cloudwatchlogs.PutLogEventsInput{ + LogEvents: events, + SequenceToken: input.SequenceToken, + LogGroupName: input.LogGroupName, + LogStreamName: input.LogStreamName, + } + output := <-m.putLogEventsResult + return output.successResult, output.errorResult +} + +type mockmetadataclient struct { + regionResult chan *regionResult +} + +type regionResult struct { + successResult string + errorResult error +} + +func newMockMetadataClient() *mockmetadataclient { + return &mockmetadataclient{ + regionResult: make(chan *regionResult, 1), + } +} + +func (m *mockmetadataclient) Region() (string, error) { + output := <-m.regionResult + return output.successResult, output.errorResult +} diff --git a/components/engine/daemon/logger/copier.go b/components/engine/daemon/logger/copier.go new file mode 100644 index 0000000000..65d8fb148e --- /dev/null +++ b/components/engine/daemon/logger/copier.go @@ -0,0 +1,135 @@ +package logger + +import ( + "bytes" + "io" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + bufSize = 16 * 1024 + readSize = 2 * 1024 +) + +// Copier can copy logs from specified sources to Logger and attach Timestamp. +// Writes are concurrent, so you need implement some sync in your logger. +type Copier struct { + // srcs is map of name -> reader pairs, for example "stdout", "stderr" + srcs map[string]io.Reader + dst Logger + copyJobs sync.WaitGroup + closeOnce sync.Once + closed chan struct{} +} + +// NewCopier creates a new Copier +func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier { + return &Copier{ + srcs: srcs, + dst: dst, + closed: make(chan struct{}), + } +} + +// Run starts logs copying +func (c *Copier) Run() { + for src, w := range c.srcs { + c.copyJobs.Add(1) + go c.copySrc(src, w) + } +} + +func (c *Copier) copySrc(name string, src io.Reader) { + defer c.copyJobs.Done() + buf := make([]byte, bufSize) + n := 0 + eof := false + + for { + select { + case <-c.closed: + return + default: + // Work out how much more data we are okay with reading this time. + upto := n + readSize + if upto > cap(buf) { + upto = cap(buf) + } + // Try to read that data. + if upto > n { + read, err := src.Read(buf[n:upto]) + if err != nil { + if err != io.EOF { + logrus.Errorf("Error scanning log stream: %s", err) + return + } + eof = true + } + n += read + } + // If we have no data to log, and there's no more coming, we're done. + if n == 0 && eof { + return + } + // Break up the data that we've buffered up into lines, and log each in turn. + p := 0 + for q := bytes.IndexByte(buf[p:n], '\n'); q >= 0; q = bytes.IndexByte(buf[p:n], '\n') { + select { + case <-c.closed: + return + default: + msg := NewMessage() + msg.Source = name + msg.Timestamp = time.Now().UTC() + msg.Line = append(msg.Line, buf[p:p+q]...) + + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + } + p += q + 1 + } + // If there's no more coming, or the buffer is full but + // has no newlines, log whatever we haven't logged yet, + // noting that it's a partial log line. + if eof || (p == 0 && n == len(buf)) { + if p < n { + msg := NewMessage() + msg.Source = name + msg.Timestamp = time.Now().UTC() + msg.Line = append(msg.Line, buf[p:n]...) + msg.Partial = true + + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + p = 0 + n = 0 + } + if eof { + return + } + } + // Move any unlogged data to the front of the buffer in preparation for another read. + if p > 0 { + copy(buf[0:], buf[p:n]) + n -= p + } + } + } +} + +// Wait waits until all copying is done +func (c *Copier) Wait() { + c.copyJobs.Wait() +} + +// Close closes the copier +func (c *Copier) Close() { + c.closeOnce.Do(func() { + close(c.closed) + }) +} diff --git a/components/engine/daemon/logger/copier_test.go b/components/engine/daemon/logger/copier_test.go new file mode 100644 index 0000000000..4210022dcd --- /dev/null +++ b/components/engine/daemon/logger/copier_test.go @@ -0,0 +1,296 @@ +package logger + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "sync" + "testing" + "time" +) + +type TestLoggerJSON struct { + *json.Encoder + mu sync.Mutex + delay time.Duration +} + +func (l *TestLoggerJSON) Log(m *Message) error { + if l.delay > 0 { + time.Sleep(l.delay) + } + l.mu.Lock() + defer l.mu.Unlock() + return l.Encode(m) +} + +func (l *TestLoggerJSON) Close() error { return nil } + +func (l *TestLoggerJSON) Name() string { return "json" } + +func TestCopier(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + stderrLine := "Line that thinks that it is log line from docker stderr" + stdoutTrailingLine := "stdout trailing line" + stderrTrailingLine := "stderr trailing line" + + var stdout bytes.Buffer + var stderr bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLine + "\n"); err != nil { + t.Fatal(err) + } + } + + // Test remaining lines without line-endings + if _, err := stdout.WriteString(stdoutTrailingLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrTrailingLine); err != nil { + t.Fatal(err) + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + c := NewCopier( + map[string]io.Reader{ + "stdout": &stdout, + "stderr": &stderr, + }, + jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" { + t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") + } + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLine && string(msg.Line) != stdoutTrailingLine { + t.Fatalf("Wrong Line: %q, expected %q or %q", msg.Line, stdoutLine, stdoutTrailingLine) + } + } + if msg.Source == "stderr" { + if string(msg.Line) != stderrLine && string(msg.Line) != stderrTrailingLine { + t.Fatalf("Wrong Line: %q, expected %q or %q", msg.Line, stderrLine, stderrTrailingLine) + } + } + } +} + +// TestCopierLongLines tests long lines without line breaks +func TestCopierLongLines(t *testing.T) { + // Long lines (should be split at "bufSize") + const bufSize = 16 * 1024 + stdoutLongLine := strings.Repeat("a", bufSize) + stderrLongLine := strings.Repeat("b", bufSize) + stdoutTrailingLine := "stdout trailing line" + stderrTrailingLine := "stderr trailing line" + + var stdout bytes.Buffer + var stderr bytes.Buffer + + for i := 0; i < 3; i++ { + if _, err := stdout.WriteString(stdoutLongLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLongLine); err != nil { + t.Fatal(err) + } + } + + if _, err := stdout.WriteString(stdoutTrailingLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrTrailingLine); err != nil { + t.Fatal(err) + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + c := NewCopier( + map[string]io.Reader{ + "stdout": &stdout, + "stderr": &stderr, + }, + jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" { + t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") + } + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLongLine && string(msg.Line) != stdoutTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stdoutLongLine' or 'stdoutTrailingLine'", msg.Line) + } + } + if msg.Source == "stderr" { + if string(msg.Line) != stderrLongLine && string(msg.Line) != stderrTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stderrLongLine' or 'stderrTrailingLine'", msg.Line) + } + } + } +} + +func TestCopierSlow(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + var stdout bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + } + + var jsonBuf bytes.Buffer + //encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)} + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond} + + c := NewCopier(map[string]io.Reader{"stdout": &stdout}, jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + <-time.After(150 * time.Millisecond) + c.Close() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("failed to exit in time after the copier is closed") + case <-wait: + } +} + +type BenchmarkLoggerDummy struct { +} + +func (l *BenchmarkLoggerDummy) Log(m *Message) error { PutMessage(m); return nil } + +func (l *BenchmarkLoggerDummy) Close() error { return nil } + +func (l *BenchmarkLoggerDummy) Name() string { return "dummy" } + +func BenchmarkCopier64(b *testing.B) { + benchmarkCopier(b, 1<<6) +} +func BenchmarkCopier128(b *testing.B) { + benchmarkCopier(b, 1<<7) +} +func BenchmarkCopier256(b *testing.B) { + benchmarkCopier(b, 1<<8) +} +func BenchmarkCopier512(b *testing.B) { + benchmarkCopier(b, 1<<9) +} +func BenchmarkCopier1K(b *testing.B) { + benchmarkCopier(b, 1<<10) +} +func BenchmarkCopier2K(b *testing.B) { + benchmarkCopier(b, 1<<11) +} +func BenchmarkCopier4K(b *testing.B) { + benchmarkCopier(b, 1<<12) +} +func BenchmarkCopier8K(b *testing.B) { + benchmarkCopier(b, 1<<13) +} +func BenchmarkCopier16K(b *testing.B) { + benchmarkCopier(b, 1<<14) +} +func BenchmarkCopier32K(b *testing.B) { + benchmarkCopier(b, 1<<15) +} +func BenchmarkCopier64K(b *testing.B) { + benchmarkCopier(b, 1<<16) +} +func BenchmarkCopier128K(b *testing.B) { + benchmarkCopier(b, 1<<17) +} +func BenchmarkCopier256K(b *testing.B) { + benchmarkCopier(b, 1<<18) +} + +func piped(b *testing.B, iterations int, delay time.Duration, buf []byte) io.Reader { + r, w, err := os.Pipe() + if err != nil { + b.Fatal(err) + return nil + } + go func() { + for i := 0; i < iterations; i++ { + time.Sleep(delay) + if n, err := w.Write(buf); err != nil || n != len(buf) { + if err != nil { + b.Fatal(err) + } + b.Fatal(fmt.Errorf("short write")) + } + } + w.Close() + }() + return r +} + +func benchmarkCopier(b *testing.B, length int) { + b.StopTimer() + buf := []byte{'A'} + for len(buf) < length { + buf = append(buf, buf...) + } + buf = append(buf[:length-1], []byte{'\n'}...) + b.StartTimer() + for i := 0; i < b.N; i++ { + c := NewCopier( + map[string]io.Reader{ + "buffer": piped(b, 10, time.Nanosecond, buf), + }, + &BenchmarkLoggerDummy{}) + c.Run() + c.Wait() + c.Close() + } +} diff --git a/components/engine/daemon/logger/etwlogs/etwlogs_windows.go b/components/engine/daemon/logger/etwlogs/etwlogs_windows.go new file mode 100644 index 0000000000..cc44d51f56 --- /dev/null +++ b/components/engine/daemon/logger/etwlogs/etwlogs_windows.go @@ -0,0 +1,169 @@ +// Package etwlogs provides a log driver for forwarding container logs +// as ETW events.(ETW stands for Event Tracing for Windows) +// A client can then create an ETW listener to listen for events that are sent +// by the ETW provider that we register, using the provider's GUID "a3693192-9ed6-46d2-a981-f8226c8363bd". +// Here is an example of how to do this using the logman utility: +// 1. logman start -ets DockerContainerLogs -p {a3693192-9ed6-46d2-a981-f8226c8363bd} 0 0 -o trace.etl +// 2. Run container(s) and generate log messages +// 3. logman stop -ets DockerContainerLogs +// 4. You can then convert the etl log file to XML using: tracerpt -y trace.etl +// +// Each container log message generates an ETW event that also contains: +// the container name and ID, the timestamp, and the stream type. +package etwlogs + +import ( + "errors" + "fmt" + "sync" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "golang.org/x/sys/windows" +) + +type etwLogs struct { + containerName string + imageName string + containerID string + imageID string +} + +const ( + name = "etwlogs" + win32CallSuccess = 0 +) + +var ( + modAdvapi32 = windows.NewLazySystemDLL("Advapi32.dll") + procEventRegister = modAdvapi32.NewProc("EventRegister") + procEventWriteString = modAdvapi32.NewProc("EventWriteString") + procEventUnregister = modAdvapi32.NewProc("EventUnregister") +) +var providerHandle syscall.Handle +var refCount int +var mu sync.Mutex + +func init() { + providerHandle = syscall.InvalidHandle + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } +} + +// New creates a new etwLogs logger for the given container and registers the EWT provider. +func New(info logger.Info) (logger.Logger, error) { + if err := registerETWProvider(); err != nil { + return nil, err + } + logrus.Debugf("logging driver etwLogs configured for container: %s.", info.ContainerID) + + return &etwLogs{ + containerName: info.Name(), + imageName: info.ContainerImageName, + containerID: info.ContainerID, + imageID: info.ContainerImageID, + }, nil +} + +// Log logs the message to the ETW stream. +func (etwLogger *etwLogs) Log(msg *logger.Message) error { + if providerHandle == syscall.InvalidHandle { + // This should never be hit, if it is, it indicates a programming error. + errorMessage := "ETWLogs cannot log the message, because the event provider has not been registered." + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + m := createLogMessage(etwLogger, msg) + logger.PutMessage(msg) + return callEventWriteString(m) +} + +// Close closes the logger by unregistering the ETW provider. +func (etwLogger *etwLogs) Close() error { + unregisterETWProvider() + return nil +} + +func (etwLogger *etwLogs) Name() string { + return name +} + +func createLogMessage(etwLogger *etwLogs, msg *logger.Message) string { + return fmt.Sprintf("container_name: %s, image_name: %s, container_id: %s, image_id: %s, source: %s, log: %s", + etwLogger.containerName, + etwLogger.imageName, + etwLogger.containerID, + etwLogger.imageID, + msg.Source, + msg.Line) +} + +func registerETWProvider() error { + mu.Lock() + defer mu.Unlock() + if refCount == 0 { + var err error + if err = callEventRegister(); err != nil { + return err + } + } + + refCount++ + return nil +} + +func unregisterETWProvider() { + mu.Lock() + defer mu.Unlock() + if refCount == 1 { + if callEventUnregister() { + refCount-- + providerHandle = syscall.InvalidHandle + } + // Not returning an error if EventUnregister fails, because etwLogs will continue to work + } else { + refCount-- + } +} + +func callEventRegister() error { + // The provider's GUID is {a3693192-9ed6-46d2-a981-f8226c8363bd} + guid := syscall.GUID{ + Data1: 0xa3693192, + Data2: 0x9ed6, + Data3: 0x46d2, + Data4: [8]byte{0xa9, 0x81, 0xf8, 0x22, 0x6c, 0x83, 0x63, 0xbd}, + } + + ret, _, _ := procEventRegister.Call(uintptr(unsafe.Pointer(&guid)), 0, 0, uintptr(unsafe.Pointer(&providerHandle))) + if ret != win32CallSuccess { + errorMessage := fmt.Sprintf("Failed to register ETW provider. Error: %d", ret) + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return nil +} + +func callEventWriteString(message string) error { + utf16message, err := syscall.UTF16FromString(message) + + if err != nil { + return err + } + + ret, _, _ := procEventWriteString.Call(uintptr(providerHandle), 0, 0, uintptr(unsafe.Pointer(&utf16message[0]))) + if ret != win32CallSuccess { + errorMessage := fmt.Sprintf("ETWLogs provider failed to log message. Error: %d", ret) + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return nil +} + +func callEventUnregister() bool { + ret, _, _ := procEventUnregister.Call(uintptr(providerHandle)) + return ret == win32CallSuccess +} diff --git a/components/engine/daemon/logger/factory.go b/components/engine/daemon/logger/factory.go new file mode 100644 index 0000000000..32001590d9 --- /dev/null +++ b/components/engine/daemon/logger/factory.go @@ -0,0 +1,162 @@ +package logger + +import ( + "fmt" + "sort" + "sync" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/plugingetter" + units "github.com/docker/go-units" + "github.com/pkg/errors" +) + +// Creator builds a logging driver instance with given context. +type Creator func(Info) (Logger, error) + +// LogOptValidator checks the options specific to the underlying +// logging implementation. +type LogOptValidator func(cfg map[string]string) error + +type logdriverFactory struct { + registry map[string]Creator + optValidator map[string]LogOptValidator + m sync.Mutex +} + +func (lf *logdriverFactory) list() []string { + ls := make([]string, 0, len(lf.registry)) + lf.m.Lock() + for name := range lf.registry { + ls = append(ls, name) + } + lf.m.Unlock() + sort.Strings(ls) + return ls +} + +// ListDrivers gets the list of registered log driver names +func ListDrivers() []string { + return factory.list() +} + +func (lf *logdriverFactory) register(name string, c Creator) error { + if lf.driverRegistered(name) { + return fmt.Errorf("logger: log driver named '%s' is already registered", name) + } + + lf.m.Lock() + lf.registry[name] = c + lf.m.Unlock() + return nil +} + +func (lf *logdriverFactory) driverRegistered(name string) bool { + lf.m.Lock() + _, ok := lf.registry[name] + lf.m.Unlock() + if !ok { + if pluginGetter != nil { // this can be nil when the init functions are running + if l, _ := getPlugin(name, plugingetter.Lookup); l != nil { + return true + } + } + } + return ok +} + +func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.optValidator[name]; ok { + return fmt.Errorf("logger: log validator named '%s' is already registered", name) + } + lf.optValidator[name] = l + return nil +} + +func (lf *logdriverFactory) get(name string) (Creator, error) { + lf.m.Lock() + defer lf.m.Unlock() + + c, ok := lf.registry[name] + if ok { + return c, nil + } + + c, err := getPlugin(name, plugingetter.Acquire) + return c, errors.Wrapf(err, "logger: no log driver named '%s' is registered", name) +} + +func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { + lf.m.Lock() + defer lf.m.Unlock() + + c, _ := lf.optValidator[name] + return c +} + +var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance + +// RegisterLogDriver registers the given logging driver builder with given logging +// driver name. +func RegisterLogDriver(name string, c Creator) error { + return factory.register(name, c) +} + +// RegisterLogOptValidator registers the logging option validator with +// the given logging driver name. +func RegisterLogOptValidator(name string, l LogOptValidator) error { + return factory.registerLogOptValidator(name, l) +} + +// GetLogDriver provides the logging driver builder for a logging driver name. +func GetLogDriver(name string) (Creator, error) { + return factory.get(name) +} + +var builtInLogOpts = map[string]bool{ + "mode": true, + "max-buffer-size": true, +} + +// ValidateLogOpts checks the options for the given log driver. The +// options supported are specific to the LogDriver implementation. +func ValidateLogOpts(name string, cfg map[string]string) error { + if name == "none" { + return nil + } + + switch containertypes.LogMode(cfg["mode"]) { + case containertypes.LogModeBlocking, containertypes.LogModeNonBlock, containertypes.LogModeUnset: + default: + return fmt.Errorf("logger: logging mode not supported: %s", cfg["mode"]) + } + + if s, ok := cfg["max-buffer-size"]; ok { + if containertypes.LogMode(cfg["mode"]) != containertypes.LogModeNonBlock { + return fmt.Errorf("logger: max-buffer-size option is only supported with 'mode=%s'", containertypes.LogModeNonBlock) + } + if _, err := units.RAMInBytes(s); err != nil { + return errors.Wrap(err, "error parsing option max-buffer-size") + } + } + + if !factory.driverRegistered(name) { + return fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + + filteredOpts := make(map[string]string, len(builtInLogOpts)) + for k, v := range cfg { + if !builtInLogOpts[k] { + filteredOpts[k] = v + } + } + + validator := factory.getLogOptValidator(name) + if validator != nil { + return validator(filteredOpts) + } + return nil +} diff --git a/components/engine/daemon/logger/fluentd/fluentd.go b/components/engine/daemon/logger/fluentd/fluentd.go new file mode 100644 index 0000000000..23465b3f44 --- /dev/null +++ b/components/engine/daemon/logger/fluentd/fluentd.go @@ -0,0 +1,250 @@ +// Package fluentd provides the log driver for forwarding server logs +// to fluentd endpoints. +package fluentd + +import ( + "fmt" + "math" + "net" + "net/url" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-units" + "github.com/fluent/fluent-logger-golang/fluent" + "github.com/pkg/errors" +) + +type fluentd struct { + tag string + containerID string + containerName string + writer *fluent.Fluent + extra map[string]string +} + +type location struct { + protocol string + host string + port int + path string +} + +const ( + name = "fluentd" + + defaultProtocol = "tcp" + defaultHost = "127.0.0.1" + defaultPort = 24224 + defaultBufferLimit = 1024 * 1024 + + // logger tries to reconnect 2**32 - 1 times + // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] + defaultRetryWait = 1000 + defaultMaxRetries = math.MaxInt32 + + addressKey = "fluentd-address" + bufferLimitKey = "fluentd-buffer-limit" + retryWaitKey = "fluentd-retry-wait" + maxRetriesKey = "fluentd-max-retries" + asyncConnectKey = "fluentd-async-connect" +) + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a fluentd logger using the configuration passed in on +// the context. The supported context configuration variable is +// fluentd-address. +func New(info logger.Info) (logger.Logger, error) { + loc, err := parseAddress(info.Config[addressKey]) + if err != nil { + return nil, err + } + + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + extra, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + + bufferLimit := defaultBufferLimit + if info.Config[bufferLimitKey] != "" { + bl64, err := units.RAMInBytes(info.Config[bufferLimitKey]) + if err != nil { + return nil, err + } + bufferLimit = int(bl64) + } + + retryWait := defaultRetryWait + if info.Config[retryWaitKey] != "" { + rwd, err := time.ParseDuration(info.Config[retryWaitKey]) + if err != nil { + return nil, err + } + retryWait = int(rwd.Seconds() * 1000) + } + + maxRetries := defaultMaxRetries + if info.Config[maxRetriesKey] != "" { + mr64, err := strconv.ParseUint(info.Config[maxRetriesKey], 10, strconv.IntSize) + if err != nil { + return nil, err + } + maxRetries = int(mr64) + } + + asyncConnect := false + if info.Config[asyncConnectKey] != "" { + if asyncConnect, err = strconv.ParseBool(info.Config[asyncConnectKey]); err != nil { + return nil, err + } + } + + fluentConfig := fluent.Config{ + FluentPort: loc.port, + FluentHost: loc.host, + FluentNetwork: loc.protocol, + FluentSocketPath: loc.path, + BufferLimit: bufferLimit, + RetryWait: retryWait, + MaxRetry: maxRetries, + AsyncConnect: asyncConnect, + } + + logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). + Debug("logging driver fluentd configured") + + log, err := fluent.New(fluentConfig) + if err != nil { + return nil, err + } + return &fluentd{ + tag: tag, + containerID: info.ContainerID, + containerName: info.ContainerName, + writer: log, + extra: extra, + }, nil +} + +func (f *fluentd) Log(msg *logger.Message) error { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + for k, v := range f.extra { + data[k] = v + } + + ts := msg.Timestamp + logger.PutMessage(msg) + // fluent-logger-golang buffers logs from failures and disconnections, + // and these are transferred again automatically. + return f.writer.PostWithTime(f.tag, ts, data) +} + +func (f *fluentd) Close() error { + return f.writer.Close() +} + +func (f *fluentd) Name() string { + return name +} + +// ValidateLogOpt looks for fluentd specific log option fluentd-address. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "env-regex": + case "labels": + case "tag": + case addressKey: + case bufferLimitKey: + case retryWaitKey: + case maxRetriesKey: + case asyncConnectKey: + // Accepted + default: + return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key) + } + } + + _, err := parseAddress(cfg["fluentd-address"]) + return err +} + +func parseAddress(address string) (*location, error) { + if address == "" { + return &location{ + protocol: defaultProtocol, + host: defaultHost, + port: defaultPort, + path: "", + }, nil + } + + protocol := defaultProtocol + givenAddress := address + if urlutil.IsTransportURL(address) { + url, err := url.Parse(address) + if err != nil { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + // unix and unixgram socket + if url.Scheme == "unix" || url.Scheme == "unixgram" { + return &location{ + protocol: url.Scheme, + host: "", + port: 0, + path: url.Path, + }, nil + } + // tcp|udp + protocol = url.Scheme + address = url.Host + } + + host, port, err := net.SplitHostPort(address) + if err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + return &location{ + protocol: protocol, + host: host, + port: defaultPort, + path: "", + }, nil + } + + portnum, err := strconv.Atoi(port) + if err != nil { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + return &location{ + protocol: protocol, + host: host, + port: portnum, + path: "", + }, nil +} diff --git a/components/engine/daemon/logger/gcplogs/gcplogging.go b/components/engine/daemon/logger/gcplogs/gcplogging.go new file mode 100644 index 0000000000..3a48067f9d --- /dev/null +++ b/components/engine/daemon/logger/gcplogs/gcplogging.go @@ -0,0 +1,244 @@ +package gcplogs + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/docker/docker/daemon/logger" + + "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/logging" + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +const ( + name = "gcplogs" + + projectOptKey = "gcp-project" + logLabelsKey = "labels" + logEnvKey = "env" + logEnvRegexKey = "env-regex" + logCmdKey = "gcp-log-cmd" + logZoneKey = "gcp-meta-zone" + logNameKey = "gcp-meta-name" + logIDKey = "gcp-meta-id" +) + +var ( + // The number of logs the gcplogs driver has dropped. + droppedLogs uint64 + + onGCE bool + + // instance metadata populated from the metadata server if available + projectID string + zone string + instanceName string + instanceID string +) + +func init() { + + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + + if err := logger.RegisterLogOptValidator(name, ValidateLogOpts); err != nil { + logrus.Fatal(err) + } +} + +type gcplogs struct { + logger *logging.Logger + instance *instanceInfo + container *containerInfo +} + +type dockerLogEntry struct { + Instance *instanceInfo `json:"instance,omitempty"` + Container *containerInfo `json:"container,omitempty"` + Data string `json:"data,omitempty"` +} + +type instanceInfo struct { + Zone string `json:"zone,omitempty"` + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` +} + +type containerInfo struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + ImageName string `json:"imageName,omitempty"` + ImageID string `json:"imageId,omitempty"` + Created time.Time `json:"created,omitempty"` + Command string `json:"command,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +var initGCPOnce sync.Once + +func initGCP() { + initGCPOnce.Do(func() { + onGCE = metadata.OnGCE() + if onGCE { + // These will fail on instances if the metadata service is + // down or the client is compiled with an API version that + // has been removed. Since these are not vital, let's ignore + // them and make their fields in the dockerLogEntry ,omitempty + projectID, _ = metadata.ProjectID() + zone, _ = metadata.Zone() + instanceName, _ = metadata.InstanceName() + instanceID, _ = metadata.InstanceID() + } + }) +} + +// New creates a new logger that logs to Google Cloud Logging using the application +// default credentials. +// +// See https://developers.google.com/identity/protocols/application-default-credentials +func New(info logger.Info) (logger.Logger, error) { + initGCP() + + var project string + if projectID != "" { + project = projectID + } + if projectID, found := info.Config[projectOptKey]; found { + project = projectID + } + if project == "" { + return nil, fmt.Errorf("No project was specified and couldn't read project from the metadata server. Please specify a project") + } + + // Issue #29344: gcplogs segfaults (static binary) + // If HOME is not set, logging.NewClient() will call os/user.Current() via oauth2/google. + // However, in static binary, os/user.Current() leads to segfault due to a glibc issue that won't be fixed + // in a short term. (golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) + // So we forcibly set HOME so as to avoid call to os/user/Current() + if err := ensureHomeIfIAmStatic(); err != nil { + return nil, err + } + + c, err := logging.NewClient(context.Background(), project) + if err != nil { + return nil, err + } + var instanceResource *instanceInfo + if onGCE { + instanceResource = &instanceInfo{ + Zone: zone, + Name: instanceName, + ID: instanceID, + } + } else if info.Config[logZoneKey] != "" || info.Config[logNameKey] != "" || info.Config[logIDKey] != "" { + instanceResource = &instanceInfo{ + Zone: info.Config[logZoneKey], + Name: info.Config[logNameKey], + ID: info.Config[logIDKey], + } + } + + options := []logging.LoggerOption{} + if instanceResource != nil { + vmMrpb := logging.CommonResource( + &mrpb.MonitoredResource{ + Type: "gce_instance", + Labels: map[string]string{ + "instance_id": instanceResource.ID, + "zone": instanceResource.Zone, + }, + }, + ) + options = []logging.LoggerOption{vmMrpb} + } + lg := c.Logger("gcplogs-docker-driver", options...) + + if err := c.Ping(context.Background()); err != nil { + return nil, fmt.Errorf("unable to connect or authenticate with Google Cloud Logging: %v", err) + } + + extraAttributes, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + + l := &gcplogs{ + logger: lg, + container: &containerInfo{ + Name: info.ContainerName, + ID: info.ContainerID, + ImageName: info.ContainerImageName, + ImageID: info.ContainerImageID, + Created: info.ContainerCreated, + Metadata: extraAttributes, + }, + } + + if info.Config[logCmdKey] == "true" { + l.container.Command = info.Command() + } + + if instanceResource != nil { + l.instance = instanceResource + } + + // The logger "overflows" at a rate of 10,000 logs per second and this + // overflow func is called. We want to surface the error to the user + // without overly spamming /var/log/docker.log so we log the first time + // we overflow and every 1000th time after. + c.OnError = func(err error) { + if err == logging.ErrOverflow { + if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 { + logrus.Errorf("gcplogs driver has dropped %v logs", i) + } + } else { + logrus.Error(err) + } + } + + return l, nil +} + +// ValidateLogOpts validates the opts passed to the gcplogs driver. Currently, the gcplogs +// driver doesn't take any arguments. +func ValidateLogOpts(cfg map[string]string) error { + for k := range cfg { + switch k { + case projectOptKey, logLabelsKey, logEnvKey, logEnvRegexKey, logCmdKey, logZoneKey, logNameKey, logIDKey: + default: + return fmt.Errorf("%q is not a valid option for the gcplogs driver", k) + } + } + return nil +} + +func (l *gcplogs) Log(m *logger.Message) error { + data := string(m.Line) + ts := m.Timestamp + logger.PutMessage(m) + + l.logger.Log(logging.Entry{ + Timestamp: ts, + Payload: &dockerLogEntry{ + Instance: l.instance, + Container: l.container, + Data: data, + }, + }) + return nil +} + +func (l *gcplogs) Close() error { + l.logger.Flush() + return nil +} + +func (l *gcplogs) Name() string { + return name +} diff --git a/components/engine/daemon/logger/gcplogs/gcplogging_linux.go b/components/engine/daemon/logger/gcplogs/gcplogging_linux.go new file mode 100644 index 0000000000..9af8b3c17e --- /dev/null +++ b/components/engine/daemon/logger/gcplogs/gcplogging_linux.go @@ -0,0 +1,31 @@ +// +build linux + +package gcplogs + +import ( + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/homedir" +) + +// ensureHomeIfIAmStatic ensure $HOME to be set if dockerversion.IAmStatic is "true". +// See issue #29344: gcplogs segfaults (static binary) +// If HOME is not set, logging.NewClient() will call os/user.Current() via oauth2/google. +// However, in static binary, os/user.Current() leads to segfault due to a glibc issue that won't be fixed +// in a short term. (golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +// So we forcibly set HOME so as to avoid call to os/user/Current() +func ensureHomeIfIAmStatic() error { + // Note: dockerversion.IAmStatic and homedir.GetStatic() is only available for linux. + // So we need to use them in this gcplogging_linux.go rather than in gcplogging.go + if dockerversion.IAmStatic == "true" && os.Getenv("HOME") == "" { + home, err := homedir.GetStatic() + if err != nil { + return err + } + logrus.Warnf("gcplogs requires HOME to be set for static daemon binary. Forcibly setting HOME to %s.", home) + os.Setenv("HOME", home) + } + return nil +} diff --git a/components/engine/daemon/logger/gcplogs/gcplogging_others.go b/components/engine/daemon/logger/gcplogs/gcplogging_others.go new file mode 100644 index 0000000000..45e3b8d6d4 --- /dev/null +++ b/components/engine/daemon/logger/gcplogs/gcplogging_others.go @@ -0,0 +1,7 @@ +// +build !linux + +package gcplogs + +func ensureHomeIfIAmStatic() error { + return nil +} diff --git a/components/engine/daemon/logger/gelf/gelf.go b/components/engine/daemon/logger/gelf/gelf.go new file mode 100644 index 0000000000..4b0130dfbe --- /dev/null +++ b/components/engine/daemon/logger/gelf/gelf.go @@ -0,0 +1,209 @@ +// +build linux + +// Package gelf provides the log driver for forwarding server logs to +// endpoints that support the Graylog Extended Log Format. +package gelf + +import ( + "compress/flate" + "encoding/json" + "fmt" + "net" + "net/url" + "strconv" + "time" + + "github.com/Graylog2/go-gelf/gelf" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" +) + +const name = "gelf" + +type gelfLogger struct { + writer *gelf.Writer + info logger.Info + hostname string + rawExtra json.RawMessage +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a gelf logger using the configuration passed in on the +// context. The supported context configuration variable is gelf-address. +func New(info logger.Info) (logger.Logger, error) { + // parse gelf address + address, err := parseAddress(info.Config["gelf-address"]) + if err != nil { + return nil, err + } + + // collect extra data for GELF message + hostname, err := info.Hostname() + if err != nil { + return nil, fmt.Errorf("gelf: cannot access hostname to set source field") + } + + // parse log tag + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + extra := map[string]interface{}{ + "_container_id": info.ContainerID, + "_container_name": info.Name(), + "_image_id": info.ContainerImageID, + "_image_name": info.ContainerImageName, + "_command": info.Command(), + "_tag": tag, + "_created": info.ContainerCreated, + } + + extraAttrs, err := info.ExtraAttributes(func(key string) string { + if key[0] == '_' { + return key + } + return "_" + key + }) + + if err != nil { + return nil, err + } + + for k, v := range extraAttrs { + extra[k] = v + } + + rawExtra, err := json.Marshal(extra) + if err != nil { + return nil, err + } + + // create new gelfWriter + gelfWriter, err := gelf.NewWriter(address) + if err != nil { + return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) + } + + if v, ok := info.Config["gelf-compression-type"]; ok { + switch v { + case "gzip": + gelfWriter.CompressionType = gelf.CompressGzip + case "zlib": + gelfWriter.CompressionType = gelf.CompressZlib + case "none": + gelfWriter.CompressionType = gelf.CompressNone + default: + return nil, fmt.Errorf("gelf: invalid compression type %q", v) + } + } + + if v, ok := info.Config["gelf-compression-level"]; ok { + val, err := strconv.Atoi(v) + if err != nil { + return nil, fmt.Errorf("gelf: invalid compression level %s, err %v", v, err) + } + gelfWriter.CompressionLevel = val + } + + return &gelfLogger{ + writer: gelfWriter, + info: info, + hostname: hostname, + rawExtra: rawExtra, + }, nil +} + +func (s *gelfLogger) Log(msg *logger.Message) error { + level := gelf.LOG_INFO + if msg.Source == "stderr" { + level = gelf.LOG_ERR + } + + m := gelf.Message{ + Version: "1.1", + Host: s.hostname, + Short: string(msg.Line), + TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0, + Level: level, + RawExtra: s.rawExtra, + } + logger.PutMessage(msg) + + if err := s.writer.WriteMessage(&m); err != nil { + return fmt.Errorf("gelf: cannot send GELF message: %v", err) + } + return nil +} + +func (s *gelfLogger) Close() error { + return s.writer.Close() +} + +func (s *gelfLogger) Name() string { + return name +} + +// ValidateLogOpt looks for gelf specific log option gelf-address. +func ValidateLogOpt(cfg map[string]string) error { + for key, val := range cfg { + switch key { + case "gelf-address": + case "tag": + case "labels": + case "env": + case "env-regex": + case "gelf-compression-level": + i, err := strconv.Atoi(val) + if err != nil || i < flate.DefaultCompression || i > flate.BestCompression { + return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) + } + case "gelf-compression-type": + switch val { + case "gzip", "zlib", "none": + default: + return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) + } + default: + return fmt.Errorf("unknown log opt %q for gelf log driver", key) + } + } + + _, err := parseAddress(cfg["gelf-address"]) + return err +} + +func parseAddress(address string) (string, error) { + if address == "" { + return "", nil + } + if !urlutil.IsTransportURL(address) { + return "", fmt.Errorf("gelf-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", err + } + + // we support only udp + if url.Scheme != "udp" { + return "", fmt.Errorf("gelf: endpoint needs to be UDP") + } + + // get host and port + if _, _, err = net.SplitHostPort(url.Host); err != nil { + return "", fmt.Errorf("gelf: please provide gelf-address as udp://host:port") + } + + return url.Host, nil +} diff --git a/components/engine/daemon/logger/gelf/gelf_unsupported.go b/components/engine/daemon/logger/gelf/gelf_unsupported.go new file mode 100644 index 0000000000..266f73b18b --- /dev/null +++ b/components/engine/daemon/logger/gelf/gelf_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package gelf diff --git a/components/engine/daemon/logger/journald/journald.go b/components/engine/daemon/logger/journald/journald.go new file mode 100644 index 0000000000..9865a273c5 --- /dev/null +++ b/components/engine/daemon/logger/journald/journald.go @@ -0,0 +1,125 @@ +// +build linux + +// Package journald provides the log driver for forwarding server logs +// to endpoints that receive the systemd format. +package journald + +import ( + "fmt" + "sync" + "unicode" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" +) + +const name = "journald" + +type journald struct { + mu sync.Mutex + vars map[string]string // additional variables and values to send to the journal along with the log message + readers readerList + closed bool +} + +type readerList struct { + readers map[*logger.LogWatcher]*logger.LogWatcher +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, validateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// sanitizeKeyMode returns the sanitized string so that it could be used in journald. +// In journald log, there are special requirements for fields. +// Fields must be composed of uppercase letters, numbers, and underscores, but must +// not start with an underscore. +func sanitizeKeyMod(s string) string { + n := "" + for _, v := range s { + if 'a' <= v && v <= 'z' { + v = unicode.ToUpper(v) + } else if ('Z' < v || v < 'A') && ('9' < v || v < '0') { + v = '_' + } + // If (n == "" && v == '_'), then we will skip as this is the beginning with '_' + if !(n == "" && v == '_') { + n += string(v) + } + } + return n +} + +// New creates a journald logger using the configuration passed in on +// the context. +func New(info logger.Info) (logger.Logger, error) { + if !journal.Enabled() { + return nil, fmt.Errorf("journald is not enabled on this host") + } + + // parse log tag + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + vars := map[string]string{ + "CONTAINER_ID": info.ContainerID[:12], + "CONTAINER_ID_FULL": info.ContainerID, + "CONTAINER_NAME": info.Name(), + "CONTAINER_TAG": tag, + } + extraAttrs, err := info.ExtraAttributes(sanitizeKeyMod) + if err != nil { + return nil, err + } + for k, v := range extraAttrs { + vars[k] = v + } + return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil +} + +// We don't actually accept any options, but we have to supply a callback for +// the factory to pass the (probably empty) configuration map to. +func validateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "labels": + case "env": + case "env-regex": + case "tag": + default: + return fmt.Errorf("unknown log opt '%s' for journald log driver", key) + } + } + return nil +} + +func (s *journald) Log(msg *logger.Message) error { + vars := map[string]string{} + for k, v := range s.vars { + vars[k] = v + } + if msg.Partial { + vars["CONTAINER_PARTIAL_MESSAGE"] = "true" + } + + line := string(msg.Line) + logger.PutMessage(msg) + + if msg.Source == "stderr" { + return journal.Send(line, journal.PriErr, vars) + } + return journal.Send(line, journal.PriInfo, vars) +} + +func (s *journald) Name() string { + return name +} diff --git a/components/engine/daemon/logger/journald/journald_test.go b/components/engine/daemon/logger/journald/journald_test.go new file mode 100644 index 0000000000..224423fd07 --- /dev/null +++ b/components/engine/daemon/logger/journald/journald_test.go @@ -0,0 +1,23 @@ +// +build linux + +package journald + +import ( + "testing" +) + +func TestSanitizeKeyMod(t *testing.T) { + entries := map[string]string{ + "io.kubernetes.pod.name": "IO_KUBERNETES_POD_NAME", + "io?.kubernetes.pod.name": "IO__KUBERNETES_POD_NAME", + "?io.kubernetes.pod.name": "IO_KUBERNETES_POD_NAME", + "io123.kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + "_io123.kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + "__io123_kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + } + for k, v := range entries { + if sanitizeKeyMod(k) != v { + t.Fatalf("Failed to sanitize %s, got %s, expected %s", k, sanitizeKeyMod(k), v) + } + } +} diff --git a/components/engine/daemon/logger/journald/journald_unsupported.go b/components/engine/daemon/logger/journald/journald_unsupported.go new file mode 100644 index 0000000000..d52ca92e4f --- /dev/null +++ b/components/engine/daemon/logger/journald/journald_unsupported.go @@ -0,0 +1,6 @@ +// +build !linux + +package journald + +type journald struct { +} diff --git a/components/engine/daemon/logger/journald/read.go b/components/engine/daemon/logger/journald/read.go new file mode 100644 index 0000000000..9ecc3b521d --- /dev/null +++ b/components/engine/daemon/logger/journald/read.go @@ -0,0 +1,425 @@ +// +build linux,cgo,!static_build,journald + +package journald + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// +//static int get_message(sd_journal *j, const char **msg, size_t *length, int *partial) +//{ +// int rc; +// size_t plength; +// *msg = NULL; +// *length = 0; +// plength = strlen("CONTAINER_PARTIAL_MESSAGE=true"); +// rc = sd_journal_get_data(j, "CONTAINER_PARTIAL_MESSAGE", (const void **) msg, length); +// *partial = ((rc == 0) && (*length == plength) && (memcmp(*msg, "CONTAINER_PARTIAL_MESSAGE=true", plength) == 0)); +// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length); +// if (rc == 0) { +// if (*length > 8) { +// (*msg) += 8; +// *length -= 8; +// } else { +// *msg = NULL; +// *length = 0; +// rc = -ENOENT; +// } +// } +// return rc; +//} +//static int get_priority(sd_journal *j, int *priority) +//{ +// const void *data; +// size_t i, length; +// int rc; +// *priority = -1; +// rc = sd_journal_get_data(j, "PRIORITY", &data, &length); +// if (rc == 0) { +// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) { +// *priority = 0; +// for (i = 9; i < length; i++) { +// *priority = *priority * 10 + ((const char *)data)[i] - '0'; +// } +// if (length > 9) { +// rc = 0; +// } +// } +// } +// return rc; +//} +//static int is_attribute_field(const char *msg, size_t length) +//{ +// static const struct known_field { +// const char *name; +// size_t length; +// } fields[] = { +// {"MESSAGE", sizeof("MESSAGE") - 1}, +// {"MESSAGE_ID", sizeof("MESSAGE_ID") - 1}, +// {"PRIORITY", sizeof("PRIORITY") - 1}, +// {"CODE_FILE", sizeof("CODE_FILE") - 1}, +// {"CODE_LINE", sizeof("CODE_LINE") - 1}, +// {"CODE_FUNC", sizeof("CODE_FUNC") - 1}, +// {"ERRNO", sizeof("ERRNO") - 1}, +// {"SYSLOG_FACILITY", sizeof("SYSLOG_FACILITY") - 1}, +// {"SYSLOG_IDENTIFIER", sizeof("SYSLOG_IDENTIFIER") - 1}, +// {"SYSLOG_PID", sizeof("SYSLOG_PID") - 1}, +// {"CONTAINER_NAME", sizeof("CONTAINER_NAME") - 1}, +// {"CONTAINER_ID", sizeof("CONTAINER_ID") - 1}, +// {"CONTAINER_ID_FULL", sizeof("CONTAINER_ID_FULL") - 1}, +// {"CONTAINER_TAG", sizeof("CONTAINER_TAG") - 1}, +// }; +// unsigned int i; +// void *p; +// if ((length < 1) || (msg[0] == '_') || ((p = memchr(msg, '=', length)) == NULL)) { +// return -1; +// } +// length = ((const char *) p) - msg; +// for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++) { +// if ((fields[i].length == length) && (memcmp(fields[i].name, msg, length) == 0)) { +// return -1; +// } +// } +// return 0; +//} +//static int get_attribute_field(sd_journal *j, const char **msg, size_t *length) +//{ +// int rc; +// *msg = NULL; +// *length = 0; +// while ((rc = sd_journal_enumerate_data(j, (const void **) msg, length)) > 0) { +// if (is_attribute_field(*msg, *length) == 0) { +// break; +// } +// rc = -ENOENT; +// } +// return rc; +//} +//static int wait_for_data_cancelable(sd_journal *j, int pipefd) +//{ +// struct pollfd fds[2]; +// uint64_t when = 0; +// int timeout, jevents, i; +// struct timespec ts; +// uint64_t now; +// +// memset(&fds, 0, sizeof(fds)); +// fds[0].fd = pipefd; +// fds[0].events = POLLHUP; +// fds[1].fd = sd_journal_get_fd(j); +// if (fds[1].fd < 0) { +// return fds[1].fd; +// } +// +// do { +// jevents = sd_journal_get_events(j); +// if (jevents < 0) { +// return jevents; +// } +// fds[1].events = jevents; +// sd_journal_get_timeout(j, &when); +// if (when == -1) { +// timeout = -1; +// } else { +// clock_gettime(CLOCK_MONOTONIC, &ts); +// now = (uint64_t) ts.tv_sec * 1000000 + ts.tv_nsec / 1000; +// timeout = when > now ? (int) ((when - now + 999) / 1000) : 0; +// } +// i = poll(fds, 2, timeout); +// if ((i == -1) && (errno != EINTR)) { +// /* An unexpected error. */ +// return (errno != 0) ? -errno : -EINTR; +// } +// if (fds[0].revents & POLLHUP) { +// /* The close notification pipe was closed. */ +// return 0; +// } +// if (sd_journal_process(j) == SD_JOURNAL_APPEND) { +// /* Data, which we might care about, was appended. */ +// return 1; +// } +// } while ((fds[0].revents & POLLHUP) == 0); +// return 0; +//} +import "C" + +import ( + "fmt" + "strings" + "time" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/daemon/logger" +) + +func (s *journald) Close() error { + s.mu.Lock() + s.closed = true + for reader := range s.readers.readers { + reader.Close() + } + s.mu.Unlock() + return nil +} + +func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor *C.char) *C.char { + var msg, data, cursor *C.char + var length C.size_t + var stamp C.uint64_t + var priority, partial C.int + + // Walk the journal from here forward until we run out of new entries. +drain: + for { + // Try not to send a given entry twice. + if oldCursor != nil { + for C.sd_journal_test_cursor(j, oldCursor) > 0 { + if C.sd_journal_next(j) <= 0 { + break drain + } + } + } + // Read and send the logged message, if there is one to read. + i := C.get_message(j, &msg, &length, &partial) + if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { + // Read the entry's timestamp. + if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { + break + } + // Set up the time and text of the entry. + timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) + line := C.GoBytes(unsafe.Pointer(msg), C.int(length)) + if partial == 0 { + line = append(line, "\n"...) + } + // Recover the stream name by mapping + // from the journal priority back to + // the stream that we would have + // assigned that value. + source := "" + if C.get_priority(j, &priority) != 0 { + source = "" + } else if priority == C.int(journal.PriErr) { + source = "stderr" + } else if priority == C.int(journal.PriInfo) { + source = "stdout" + } + // Retrieve the values of any variables we're adding to the journal. + attrs := make(map[string]string) + C.sd_journal_restart_data(j) + for C.get_attribute_field(j, &data, &length) > C.int(0) { + kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) + attrs[kv[0]] = kv[1] + } + if len(attrs) == 0 { + attrs = nil + } + // Send the log message. + logWatcher.Msg <- &logger.Message{ + Line: line, + Source: source, + Timestamp: timestamp.In(time.UTC), + Attrs: attrs, + } + } + // If we're at the end of the journal, we're done (for now). + if C.sd_journal_next(j) <= 0 { + break + } + } + + // free(NULL) is safe + C.free(unsafe.Pointer(oldCursor)) + if C.sd_journal_get_cursor(j, &cursor) != 0 { + // ensure that we won't be freeing an address that's invalid + cursor = nil + } + return cursor +} + +func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor *C.char) *C.char { + s.mu.Lock() + s.readers.readers[logWatcher] = logWatcher + if s.closed { + // the journald Logger is closed, presumably because the container has been + // reset. So we shouldn't follow, because we'll never be woken up. But we + // should make one more drainJournal call to be sure we've got all the logs. + // Close pfd[1] so that one drainJournal happens, then cleanup, then return. + C.close(pfd[1]) + } + s.mu.Unlock() + + newCursor := make(chan *C.char) + + go func() { + for { + // Keep copying journal data out until we're notified to stop + // or we hit an error. + status := C.wait_for_data_cancelable(j, pfd[0]) + if status < 0 { + cerrstr := C.strerror(C.int(-status)) + errstr := C.GoString(cerrstr) + fmtstr := "error %q while attempting to follow journal for container %q" + logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"]) + break + } + + cursor = s.drainJournal(logWatcher, config, j, cursor) + + if status != 1 { + // We were notified to stop + break + } + } + + // Clean up. + C.close(pfd[0]) + s.mu.Lock() + delete(s.readers.readers, logWatcher) + s.mu.Unlock() + close(logWatcher.Msg) + newCursor <- cursor + }() + + // Wait until we're told to stop. + select { + case cursor = <-newCursor: + case <-logWatcher.WatchClose(): + // Notify the other goroutine that its work is done. + C.close(pfd[1]) + cursor = <-newCursor + } + + return cursor +} + +func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + var j *C.sd_journal + var cmatch, cursor *C.char + var stamp C.uint64_t + var sinceUnixMicro uint64 + var pipes [2]C.int + + // Get a handle to the journal. + rc := C.sd_journal_open(&j, C.int(0)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error opening journal") + close(logWatcher.Msg) + return + } + // If we end up following the log, we can set the journal context + // pointer and the channel pointer to nil so that we won't close them + // here, potentially while the goroutine that uses them is still + // running. Otherwise, close them when we return from this function. + following := false + defer func(pfollowing *bool) { + if !*pfollowing { + close(logWatcher.Msg) + } + C.sd_journal_close(j) + }(&following) + // Remove limits on the size of data items that we'll retrieve. + rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error setting journal data threshold") + return + } + // Add a match to have the library do the searching for us. + cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) + defer C.free(unsafe.Pointer(cmatch)) + rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error setting journal match") + return + } + // If we have a cutoff time, convert it to Unix time once. + if !config.Since.IsZero() { + nano := config.Since.UnixNano() + sinceUnixMicro = uint64(nano / 1000) + } + if config.Tail > 0 { + lines := config.Tail + // Start at the end of the journal. + if C.sd_journal_seek_tail(j) < 0 { + logWatcher.Err <- fmt.Errorf("error seeking to end of journal") + return + } + if C.sd_journal_previous(j) < 0 { + logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") + return + } + // Walk backward. + for lines > 0 { + // Stop if the entry time is before our cutoff. + // We'll need the entry time if it isn't, so go + // ahead and parse it now. + if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { + break + } else { + // Compare the timestamp on the entry + // to our threshold value. + if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { + break + } + } + lines-- + // If we're at the start of the journal, or + // don't need to back up past any more entries, + // stop. + if lines == 0 || C.sd_journal_previous(j) <= 0 { + break + } + } + } else { + // Start at the beginning of the journal. + if C.sd_journal_seek_head(j) < 0 { + logWatcher.Err <- fmt.Errorf("error seeking to start of journal") + return + } + // If we have a cutoff date, fast-forward to it. + if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { + logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") + return + } + if C.sd_journal_next(j) < 0 { + logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") + return + } + } + cursor = s.drainJournal(logWatcher, config, j, nil) + if config.Follow { + // Allocate a descriptor for following the journal, if we'll + // need one. Do it here so that we can report if it fails. + if fd := C.sd_journal_get_fd(j); fd < C.int(0) { + logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) + } else { + // Create a pipe that we can poll at the same time as + // the journald descriptor. + if C.pipe(&pipes[0]) == C.int(-1) { + logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") + } else { + cursor = s.followJournal(logWatcher, config, j, pipes, cursor) + // Let followJournal handle freeing the journal context + // object and closing the channel. + following = true + } + } + } + + C.free(unsafe.Pointer(cursor)) + return +} + +func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + go s.readLogs(logWatcher, config) + return logWatcher +} diff --git a/components/engine/daemon/logger/journald/read_native.go b/components/engine/daemon/logger/journald/read_native.go new file mode 100644 index 0000000000..bba6de55be --- /dev/null +++ b/components/engine/daemon/logger/journald/read_native.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build,journald,!journald_compat + +package journald + +// #cgo pkg-config: libsystemd +import "C" diff --git a/components/engine/daemon/logger/journald/read_native_compat.go b/components/engine/daemon/logger/journald/read_native_compat.go new file mode 100644 index 0000000000..3f7a43c59e --- /dev/null +++ b/components/engine/daemon/logger/journald/read_native_compat.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build,journald,journald_compat + +package journald + +// #cgo pkg-config: libsystemd-journal +import "C" diff --git a/components/engine/daemon/logger/journald/read_unsupported.go b/components/engine/daemon/logger/journald/read_unsupported.go new file mode 100644 index 0000000000..b43abdcaf7 --- /dev/null +++ b/components/engine/daemon/logger/journald/read_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux !cgo static_build !journald + +package journald + +func (s *journald) Close() error { + return nil +} diff --git a/components/engine/daemon/logger/jsonfilelog/jsonfilelog.go b/components/engine/daemon/logger/jsonfilelog/jsonfilelog.go new file mode 100644 index 0000000000..797644e669 --- /dev/null +++ b/components/engine/daemon/logger/jsonfilelog/jsonfilelog.go @@ -0,0 +1,159 @@ +// Package jsonfilelog provides the default Logger implementation for +// Docker logging. This logger logs to files on the host server in the +// JSON format. +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/go-units" +) + +// Name is the name of the file that the jsonlogger logs to. +const Name = "json-file" + +// JSONFileLogger is Logger implementation for default Docker logging. +type JSONFileLogger struct { + buf *bytes.Buffer + writer *loggerutils.RotateFileWriter + mu sync.Mutex + readers map[*logger.LogWatcher]struct{} // stores the active log followers + extra []byte // json-encoded extra attributes + closed bool +} + +func init() { + if err := logger.RegisterLogDriver(Name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates new JSONFileLogger which writes to filename passed in +// on given context. +func New(info logger.Info) (logger.Logger, error) { + var capval int64 = -1 + if capacity, ok := info.Config["max-size"]; ok { + var err error + capval, err = units.FromHumanSize(capacity) + if err != nil { + return nil, err + } + } + var maxFiles = 1 + if maxFileString, ok := info.Config["max-file"]; ok { + var err error + maxFiles, err = strconv.Atoi(maxFileString) + if err != nil { + return nil, err + } + if maxFiles < 1 { + return nil, fmt.Errorf("max-file cannot be less than 1") + } + } + + writer, err := loggerutils.NewRotateFileWriter(info.LogPath, capval, maxFiles) + if err != nil { + return nil, err + } + + var extra []byte + attrs, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + if len(attrs) > 0 { + var err error + extra, err = json.Marshal(attrs) + if err != nil { + return nil, err + } + } + + return &JSONFileLogger{ + buf: bytes.NewBuffer(nil), + writer: writer, + readers: make(map[*logger.LogWatcher]struct{}), + extra: extra, + }, nil +} + +// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. +func (l *JSONFileLogger) Log(msg *logger.Message) error { + timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) + if err != nil { + return err + } + l.mu.Lock() + logline := msg.Line + if !msg.Partial { + logline = append(msg.Line, '\n') + } + err = (&jsonlog.JSONLogs{ + Log: logline, + Stream: msg.Source, + Created: timestamp, + RawAttrs: l.extra, + }).MarshalJSONBuf(l.buf) + logger.PutMessage(msg) + if err != nil { + l.mu.Unlock() + return err + } + + l.buf.WriteByte('\n') + _, err = l.writer.Write(l.buf.Bytes()) + l.buf.Reset() + l.mu.Unlock() + + return err +} + +// ValidateLogOpt looks for json specific log options max-file & max-size. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "max-file": + case "max-size": + case "labels": + case "env": + case "env-regex": + default: + return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) + } + } + return nil +} + +// LogPath returns the location the given json logger logs to. +func (l *JSONFileLogger) LogPath() string { + return l.writer.LogPath() +} + +// Close closes underlying file and signals all readers to stop. +func (l *JSONFileLogger) Close() error { + l.mu.Lock() + l.closed = true + err := l.writer.Close() + for r := range l.readers { + r.Close() + delete(l.readers, r) + } + l.mu.Unlock() + return err +} + +// Name returns name of this logger. +func (l *JSONFileLogger) Name() string { + return Name +} diff --git a/components/engine/daemon/logger/jsonfilelog/jsonfilelog_test.go b/components/engine/daemon/logger/jsonfilelog/jsonfilelog_test.go new file mode 100644 index 0000000000..d2d36e943b --- /dev/null +++ b/components/engine/daemon/logger/jsonfilelog/jsonfilelog_test.go @@ -0,0 +1,249 @@ +package jsonfilelog + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strconv" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/jsonlog" +) + +func TestJSONFileLogger(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + if err := l.Log(&logger.Message{Line: []byte("line1"), Source: "src1"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{Line: []byte("line2"), Source: "src2"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{Line: []byte("line3"), Source: "src3"}); err != nil { + t.Fatal(err) + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + expected := `{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src2","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src3","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } +} + +func BenchmarkJSONFileLogger(b *testing.B) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + b.Fatal(err) + } + defer l.Close() + + testLine := "Line that thinks that it is log line from docker\n" + msg := &logger.Message{Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} + jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(jsonlog)+1) * 30) + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 30; j++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } + } +} + +func TestJSONFileLoggerWithOpts(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + config := map[string]string{"max-file": "2", "max-size": "1k"} + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filename, + Config: config, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + for i := 0; i < 20; i++ { + if err := l.Log(&logger.Message{Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil { + t.Fatal(err) + } + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + penUlt, err := ioutil.ReadFile(filename + ".1") + if err != nil { + t.Fatal(err) + } + + expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line4\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line5\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line6\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line7\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line8\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line9\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line10\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line11\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line12\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line13\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } + if string(penUlt) != expectedPenultimate { + t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate) + } + +} + +func TestJSONFileLoggerWithLabelsEnv(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + config := map[string]string{"labels": "rack,dc", "env": "environ,debug,ssl", "env-regex": "^dc"} + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filename, + Config: config, + ContainerLabels: map[string]string{"rack": "101", "dc": "lhr"}, + ContainerEnv: []string{"environ=production", "debug=false", "port=10001", "ssl=true", "dc_region=west"}, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + if err := l.Log(&logger.Message{Line: []byte("line"), Source: "src1"}); err != nil { + t.Fatal(err) + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + + var jsonLog jsonlog.JSONLogs + if err := json.Unmarshal(res, &jsonLog); err != nil { + t.Fatal(err) + } + extra := make(map[string]string) + if err := json.Unmarshal(jsonLog.RawAttrs, &extra); err != nil { + t.Fatal(err) + } + expected := map[string]string{ + "rack": "101", + "dc": "lhr", + "environ": "production", + "debug": "false", + "ssl": "true", + "dc_region": "west", + } + if !reflect.DeepEqual(extra, expected) { + t.Fatalf("Wrong log attrs: %q, expected %q", extra, expected) + } +} + +func BenchmarkJSONFileLoggerWithReader(b *testing.B) { + b.StopTimer() + b.ResetTimer() + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + dir, err := ioutil.TempDir("", "json-logger-bench") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(dir) + + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filepath.Join(dir, "container.log"), + }) + if err != nil { + b.Fatal(err) + } + defer l.Close() + msg := &logger.Message{Line: []byte("line"), Source: "src1"} + jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(jsonlog)+1) * 30) + + b.StartTimer() + + go func() { + for i := 0; i < b.N; i++ { + for j := 0; j < 30; j++ { + l.Log(msg) + } + } + l.Close() + }() + + lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Follow: true}) + watchClose := lw.WatchClose() + for { + select { + case <-lw.Msg: + case <-watchClose: + return + } + } +} diff --git a/components/engine/daemon/logger/jsonfilelog/read.go b/components/engine/daemon/logger/jsonfilelog/read.go new file mode 100644 index 0000000000..9f282eafbb --- /dev/null +++ b/components/engine/daemon/logger/jsonfilelog/read.go @@ -0,0 +1,324 @@ +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/fsnotify/fsnotify" + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/filenotify" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/tailfile" +) + +const maxJSONDecodeRetry = 20000 + +func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { + l.Reset() + if err := dec.Decode(l); err != nil { + return nil, err + } + msg := &logger.Message{ + Source: l.Stream, + Timestamp: l.Created, + Line: []byte(l.Log), + Attrs: l.Attrs, + } + return msg, nil +} + +// ReadLogs implements the logger's LogReader interface for the logs +// created by this driver. +func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + + go l.readLogs(logWatcher, config) + return logWatcher +} + +func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(logWatcher.Msg) + + // lock so the read stream doesn't get corrupted due to rotations or other log data written while we read + // This will block writes!!! + l.mu.Lock() + + pth := l.writer.LogPath() + var files []io.ReadSeeker + for i := l.writer.MaxFiles(); i > 1; i-- { + f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) + if err != nil { + if !os.IsNotExist(err) { + logWatcher.Err <- err + break + } + continue + } + defer f.Close() + + files = append(files, f) + } + + latestFile, err := os.Open(pth) + if err != nil { + logWatcher.Err <- err + l.mu.Unlock() + return + } + defer latestFile.Close() + + if config.Tail != 0 { + tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) + tailFile(tailer, logWatcher, config.Tail, config.Since) + } + + // close all the rotated files + for _, f := range files { + if err := f.(io.Closer).Close(); err != nil { + logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) + } + } + + if !config.Follow || l.closed { + l.mu.Unlock() + return + } + + if config.Tail >= 0 { + latestFile.Seek(0, os.SEEK_END) + } + + notifyRotate := l.writer.NotifyRotate() + defer l.writer.NotifyRotateEvict(notifyRotate) + + l.readers[logWatcher] = struct{}{} + + l.mu.Unlock() + + followLogs(latestFile, logWatcher, notifyRotate, config.Since) + + l.mu.Lock() + delete(l.readers, logWatcher) + l.mu.Unlock() +} + +func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { + var rdr io.Reader + rdr = f + if tail > 0 { + ls, err := tailfile.TailFile(f, tail) + if err != nil { + logWatcher.Err <- err + return + } + rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) + } + dec := json.NewDecoder(rdr) + l := &jsonlog.JSONLog{} + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + logWatcher.Err <- err + } + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case <-logWatcher.WatchClose(): + return + case logWatcher.Msg <- msg: + } + } +} + +func watchFile(name string) (filenotify.FileWatcher, error) { + fileWatcher, err := filenotify.New() + if err != nil { + return nil, err + } + + if err := fileWatcher.Add(name); err != nil { + logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) + fileWatcher.Close() + fileWatcher = filenotify.NewPollingWatcher() + + if err := fileWatcher.Add(name); err != nil { + fileWatcher.Close() + logrus.Debugf("error watching log file for modifications: %v", err) + return nil, err + } + } + return fileWatcher, nil +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { + dec := json.NewDecoder(f) + l := &jsonlog.JSONLog{} + + name := f.Name() + fileWatcher, err := watchFile(name) + if err != nil { + logWatcher.Err <- err + return + } + defer func() { + f.Close() + fileWatcher.Remove(name) + fileWatcher.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + select { + case <-logWatcher.WatchClose(): + fileWatcher.Remove(name) + cancel() + case <-ctx.Done(): + return + } + }() + + var retries int + handleRotate := func() error { + f.Close() + fileWatcher.Remove(name) + + // retry when the file doesn't exist + for retries := 0; retries <= 5; retries++ { + f, err = os.Open(name) + if err == nil || !os.IsNotExist(err) { + break + } + } + if err != nil { + return err + } + if err := fileWatcher.Add(name); err != nil { + return err + } + dec = json.NewDecoder(f) + return nil + } + + errRetry := errors.New("retry") + errDone := errors.New("done") + waitRead := func() error { + select { + case e := <-fileWatcher.Events(): + switch e.Op { + case fsnotify.Write: + dec = json.NewDecoder(f) + return nil + case fsnotify.Rename, fsnotify.Remove: + select { + case <-notifyRotate: + case <-ctx.Done(): + return errDone + } + if err := handleRotate(); err != nil { + return err + } + return nil + } + return errRetry + case err := <-fileWatcher.Errors(): + logrus.Debug("logger got error watching file: %v", err) + // Something happened, let's try and stay alive and create a new watcher + if retries <= 5 { + fileWatcher.Close() + fileWatcher, err = watchFile(name) + if err != nil { + return err + } + retries++ + return errRetry + } + return err + case <-ctx.Done(): + return errDone + } + } + + handleDecodeErr := func(err error) error { + if err == io.EOF { + for { + err := waitRead() + if err == nil { + break + } + if err == errRetry { + continue + } + return err + } + return nil + } + // try again because this shouldn't happen + if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { + dec = json.NewDecoder(f) + retries++ + return nil + } + // io.ErrUnexpectedEOF is returned from json.Decoder when there is + // remaining data in the parser's buffer while an io.EOF occurs. + // If the json logger writes a partial json log entry to the disk + // while at the same time the decoder tries to decode it, the race condition happens. + if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { + reader := io.MultiReader(dec.Buffered(), f) + dec = json.NewDecoder(reader) + retries++ + return nil + } + return err + } + + // main loop + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err := handleDecodeErr(err); err != nil { + if err == errDone { + return + } + // we got an unrecoverable error, so return + logWatcher.Err <- err + return + } + // ready to try again + continue + } + + retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case logWatcher.Msg <- msg: + case <-ctx.Done(): + logWatcher.Msg <- msg + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } + } + } +} diff --git a/components/engine/daemon/logger/logentries/logentries.go b/components/engine/daemon/logger/logentries/logentries.go new file mode 100644 index 0000000000..a353d9d49a --- /dev/null +++ b/components/engine/daemon/logger/logentries/logentries.go @@ -0,0 +1,97 @@ +// Package logentries provides the log driver for forwarding server logs +// to logentries endpoints. +package logentries + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/bsphere/le_go" + "github.com/docker/docker/daemon/logger" +) + +type logentries struct { + tag string + containerID string + containerName string + writer *le_go.Logger + extra map[string]string +} + +const ( + name = "logentries" + token = "logentries-token" +) + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a logentries logger using the configuration passed in on +// the context. The supported context configuration variable is +// logentries-token. +func New(info logger.Info) (logger.Logger, error) { + logrus.WithField("container", info.ContainerID). + WithField("token", info.Config[token]). + Debug("logging driver logentries configured") + + log, err := le_go.Connect(info.Config[token]) + if err != nil { + return nil, err + } + return &logentries{ + containerID: info.ContainerID, + containerName: info.ContainerName, + writer: log, + }, nil +} + +func (f *logentries) Log(msg *logger.Message) error { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + for k, v := range f.extra { + data[k] = v + } + ts := msg.Timestamp + logger.PutMessage(msg) + f.writer.Println(f.tag, ts, data) + return nil +} + +func (f *logentries) Close() error { + return f.writer.Close() +} + +func (f *logentries) Name() string { + return name +} + +// ValidateLogOpt looks for logentries specific log option logentries-address. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "env-regex": + case "labels": + case "tag": + case key: + default: + return fmt.Errorf("unknown log opt '%s' for logentries log driver", key) + } + } + + if cfg[token] == "" { + return fmt.Errorf("Missing logentries token") + } + + return nil +} diff --git a/components/engine/daemon/logger/logger.go b/components/engine/daemon/logger/logger.go new file mode 100644 index 0000000000..daa5403de2 --- /dev/null +++ b/components/engine/daemon/logger/logger.go @@ -0,0 +1,136 @@ +// Package logger defines interfaces that logger drivers implement to +// log messages. +// +// The other half of a logger driver is the implementation of the +// factory, which holds the contextual instance information that +// allows multiple loggers of the same type to perform different +// actions, such as logging to different locations. +package logger + +import ( + "errors" + "sync" + "time" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/jsonlog" +) + +// ErrReadLogsNotSupported is returned when the logger does not support reading logs. +var ErrReadLogsNotSupported = errors.New("configured logging driver does not support reading") + +const ( + // TimeFormat is the time format used for timestamps sent to log readers. + TimeFormat = jsonlog.RFC3339NanoFixed + logWatcherBufferSize = 4096 +) + +var messagePool = &sync.Pool{New: func() interface{} { return &Message{Line: make([]byte, 0, 256)} }} + +// NewMessage returns a new message from the message sync.Pool +func NewMessage() *Message { + return messagePool.Get().(*Message) +} + +// PutMessage puts the specified message back n the message pool. +// The message fields are reset before putting into the pool. +func PutMessage(msg *Message) { + msg.reset() + messagePool.Put(msg) +} + +// Message is datastructure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +// +// Message is subtyped from backend.LogMessage because there is a lot of +// internal complexity around the Message type that should not be exposed +// to any package not explicitly importing the logger type. +// +// Any changes made to this struct must also be updated in the `reset` function +type Message backend.LogMessage + +// reset sets the message back to default values +// This is used when putting a message back into the message pool. +// Any changes to the `Message` struct should be reflected here. +func (m *Message) reset() { + m.Line = m.Line[:0] + m.Source = "" + m.Attrs = nil + m.Partial = false + + m.Err = nil +} + +// AsLogMessage returns a pointer to the message as a pointer to +// backend.LogMessage, which is an identical type with a different purpose +func (m *Message) AsLogMessage() *backend.LogMessage { + return (*backend.LogMessage)(m) +} + +// LogAttributes is used to hold the extra attributes available in the log message +// Primarily used for converting the map type to string and sorting. +// Imported here so it can be used internally with less refactoring +type LogAttributes backend.LogAttributes + +// Logger is the interface for docker logging drivers. +type Logger interface { + Log(*Message) error + Name() string + Close() error +} + +// ReadConfig is the configuration passed into ReadLogs. +type ReadConfig struct { + Since time.Time + Tail int + Follow bool +} + +// LogReader is the interface for reading log messages for loggers that support reading. +type LogReader interface { + // Read logs from underlying logging backend + ReadLogs(ReadConfig) *LogWatcher +} + +// LogWatcher is used when consuming logs read from the LogReader interface. +type LogWatcher struct { + // For sending log messages to a reader. + Msg chan *Message + // For sending error messages that occur while while reading logs. + Err chan error + closeOnce sync.Once + closeNotifier chan struct{} +} + +// NewLogWatcher returns a new LogWatcher. +func NewLogWatcher() *LogWatcher { + return &LogWatcher{ + Msg: make(chan *Message, logWatcherBufferSize), + Err: make(chan error, 1), + closeNotifier: make(chan struct{}), + } +} + +// Close notifies the underlying log reader to stop. +func (w *LogWatcher) Close() { + // only close if not already closed + w.closeOnce.Do(func() { + close(w.closeNotifier) + }) +} + +// WatchClose returns a channel receiver that receives notification +// when the watcher has been closed. This should only be called from +// one goroutine. +func (w *LogWatcher) WatchClose() <-chan struct{} { + return w.closeNotifier +} + +// Capability defines the list of capabilties that a driver can implement +// These capabilities are not required to be a logging driver, however do +// determine how a logging driver can be used +type Capability struct { + // Determines if a log driver can read back logs + ReadLogs bool +} diff --git a/components/engine/daemon/logger/logger_test.go b/components/engine/daemon/logger/logger_test.go new file mode 100644 index 0000000000..4d6e079308 --- /dev/null +++ b/components/engine/daemon/logger/logger_test.go @@ -0,0 +1,19 @@ +package logger + +func (m *Message) copy() *Message { + msg := &Message{ + Source: m.Source, + Partial: m.Partial, + Timestamp: m.Timestamp, + } + + if m.Attrs != nil { + msg.Attrs = make(map[string]string, len(m.Attrs)) + for k, v := range m.Attrs { + msg.Attrs[k] = v + } + } + + msg.Line = append(make([]byte, 0, len(m.Line)), m.Line...) + return msg +} diff --git a/components/engine/daemon/logger/loggerutils/log_tag.go b/components/engine/daemon/logger/loggerutils/log_tag.go new file mode 100644 index 0000000000..f801047c41 --- /dev/null +++ b/components/engine/daemon/logger/loggerutils/log_tag.go @@ -0,0 +1,31 @@ +package loggerutils + +import ( + "bytes" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/templates" +) + +// DefaultTemplate defines the defaults template logger should use. +const DefaultTemplate = "{{.ID}}" + +// ParseLogTag generates a context aware tag for consistency across different +// log drivers based on the context of the running container. +func ParseLogTag(info logger.Info, defaultTemplate string) (string, error) { + tagTemplate := info.Config["tag"] + if tagTemplate == "" { + tagTemplate = defaultTemplate + } + + tmpl, err := templates.NewParse("log-tag", tagTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &info); err != nil { + return "", err + } + + return buf.String(), nil +} diff --git a/components/engine/daemon/logger/loggerutils/log_tag_test.go b/components/engine/daemon/logger/loggerutils/log_tag_test.go new file mode 100644 index 0000000000..1a6d9f1516 --- /dev/null +++ b/components/engine/daemon/logger/loggerutils/log_tag_test.go @@ -0,0 +1,47 @@ +package loggerutils + +import ( + "testing" + + "github.com/docker/docker/daemon/logger" +) + +func TestParseLogTagDefaultTag(t *testing.T) { + info := buildContext(map[string]string{}) + tag, e := ParseLogTag(info, "{{.ID}}") + assertTag(t, e, tag, info.ID()) +} + +func TestParseLogTag(t *testing.T) { + info := buildContext(map[string]string{"tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) + tag, e := ParseLogTag(info, "{{.ID}}") + assertTag(t, e, tag, "test-image/test-container/container-ab") +} + +func TestParseLogTagEmptyTag(t *testing.T) { + info := buildContext(map[string]string{}) + tag, e := ParseLogTag(info, "{{.DaemonName}}/{{.ID}}") + assertTag(t, e, tag, "test-dockerd/container-ab") +} + +// Helpers + +func buildContext(cfg map[string]string) logger.Info { + return logger.Info{ + ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", + ContainerName: "/test-container", + ContainerImageID: "image-abcdefghijklmnopqrstuvwxyz01234567890", + ContainerImageName: "test-image", + Config: cfg, + DaemonName: "test-dockerd", + } +} + +func assertTag(t *testing.T, e error, tag string, expected string) { + if e != nil { + t.Fatalf("Error generating tag: %q", e) + } + if tag != expected { + t.Fatalf("Wrong tag: %q, should be %q", tag, expected) + } +} diff --git a/components/engine/daemon/logger/loggerutils/rotatefilewriter.go b/components/engine/daemon/logger/loggerutils/rotatefilewriter.go new file mode 100644 index 0000000000..99e0964aea --- /dev/null +++ b/components/engine/daemon/logger/loggerutils/rotatefilewriter.go @@ -0,0 +1,124 @@ +package loggerutils + +import ( + "os" + "strconv" + "sync" + + "github.com/docker/docker/pkg/pubsub" +) + +// RotateFileWriter is Logger implementation for default Docker logging. +type RotateFileWriter struct { + f *os.File // store for closing + mu sync.Mutex + capacity int64 //maximum size of each file + currentSize int64 // current size of the latest file + maxFiles int //maximum number of files + notifyRotate *pubsub.Publisher +} + +//NewRotateFileWriter creates new RotateFileWriter +func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) { + log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) + if err != nil { + return nil, err + } + + size, err := log.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + + return &RotateFileWriter{ + f: log, + capacity: capacity, + currentSize: size, + maxFiles: maxFiles, + notifyRotate: pubsub.NewPublisher(0, 1), + }, nil +} + +//WriteLog write log message to File +func (w *RotateFileWriter) Write(message []byte) (int, error) { + w.mu.Lock() + if err := w.checkCapacityAndRotate(); err != nil { + w.mu.Unlock() + return -1, err + } + + n, err := w.f.Write(message) + if err == nil { + w.currentSize += int64(n) + } + w.mu.Unlock() + return n, err +} + +func (w *RotateFileWriter) checkCapacityAndRotate() error { + if w.capacity == -1 { + return nil + } + + if w.currentSize >= w.capacity { + name := w.f.Name() + if err := w.f.Close(); err != nil { + return err + } + if err := rotate(name, w.maxFiles); err != nil { + return err + } + file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 06400) + if err != nil { + return err + } + w.f = file + w.currentSize = 0 + w.notifyRotate.Publish(struct{}{}) + } + + return nil +} + +func rotate(name string, maxFiles int) error { + if maxFiles < 2 { + return nil + } + for i := maxFiles - 1; i > 1; i-- { + toPath := name + "." + strconv.Itoa(i) + fromPath := name + "." + strconv.Itoa(i-1) + if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { + return err + } + } + + if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// LogPath returns the location the given writer logs to. +func (w *RotateFileWriter) LogPath() string { + return w.f.Name() +} + +// MaxFiles return maximum number of files +func (w *RotateFileWriter) MaxFiles() int { + return w.maxFiles +} + +//NotifyRotate returns the new subscriber +func (w *RotateFileWriter) NotifyRotate() chan interface{} { + return w.notifyRotate.Subscribe() +} + +//NotifyRotateEvict removes the specified subscriber from receiving any more messages. +func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { + w.notifyRotate.Evict(sub) +} + +// Close closes underlying file and signals all readers to stop. +func (w *RotateFileWriter) Close() error { + return w.f.Close() +} diff --git a/components/engine/daemon/logger/loginfo.go b/components/engine/daemon/logger/loginfo.go new file mode 100644 index 0000000000..4c930b9056 --- /dev/null +++ b/components/engine/daemon/logger/loginfo.go @@ -0,0 +1,129 @@ +package logger + +import ( + "fmt" + "os" + "regexp" + "strings" + "time" +) + +// Info provides enough information for a logging driver to do its function. +type Info struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + ContainerEnv []string + ContainerLabels map[string]string + LogPath string + DaemonName string +} + +// ExtraAttributes returns the user-defined extra attributes (labels, +// environment variables) in key-value format. This can be used by log drivers +// that support metadata to add more context to a log. +func (info *Info) ExtraAttributes(keyMod func(string) string) (map[string]string, error) { + extra := make(map[string]string) + labels, ok := info.Config["labels"] + if ok && len(labels) > 0 { + for _, l := range strings.Split(labels, ",") { + if v, ok := info.ContainerLabels[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + envMapping := make(map[string]string) + for _, e := range info.ContainerEnv { + if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { + envMapping[kv[0]] = kv[1] + } + } + + env, ok := info.Config["env"] + if ok && len(env) > 0 { + for _, l := range strings.Split(env, ",") { + if v, ok := envMapping[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + envRegex, ok := info.Config["env-regex"] + if ok && len(envRegex) > 0 { + re, err := regexp.Compile(envRegex) + if err != nil { + return nil, err + } + for k, v := range envMapping { + if re.MatchString(k) { + if keyMod != nil { + k = keyMod(k) + } + extra[k] = v + } + } + } + + return extra, nil +} + +// Hostname returns the hostname from the underlying OS. +func (info *Info) Hostname() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("logger: can not resolve hostname: %v", err) + } + return hostname, nil +} + +// Command returns the command that the container being logged was +// started with. The Entrypoint is prepended to the container +// arguments. +func (info *Info) Command() string { + terms := []string{info.ContainerEntrypoint} + terms = append(terms, info.ContainerArgs...) + command := strings.Join(terms, " ") + return command +} + +// ID Returns the Container ID shortened to 12 characters. +func (info *Info) ID() string { + return info.ContainerID[:12] +} + +// FullID is an alias of ContainerID. +func (info *Info) FullID() string { + return info.ContainerID +} + +// Name returns the ContainerName without a preceding '/'. +func (info *Info) Name() string { + return strings.TrimPrefix(info.ContainerName, "/") +} + +// ImageID returns the ContainerImageID shortened to 12 characters. +func (info *Info) ImageID() string { + return info.ContainerImageID[:12] +} + +// ImageFullID is an alias of ContainerImageID. +func (info *Info) ImageFullID() string { + return info.ContainerImageID +} + +// ImageName is an alias of ContainerImageName +func (info *Info) ImageName() string { + return info.ContainerImageName +} diff --git a/components/engine/daemon/logger/plugin.go b/components/engine/daemon/logger/plugin.go new file mode 100644 index 0000000000..de618c5aea --- /dev/null +++ b/components/engine/daemon/logger/plugin.go @@ -0,0 +1,89 @@ +package logger + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types/plugins/logdriver" + getter "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" +) + +var pluginGetter getter.PluginGetter + +const extName = "LogDriver" + +// logPlugin defines the available functions that logging plugins must implement. +type logPlugin interface { + StartLogging(streamPath string, info Info) (err error) + StopLogging(streamPath string) (err error) + Capabilities() (cap Capability, err error) + ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) +} + +// RegisterPluginGetter sets the plugingetter +func RegisterPluginGetter(plugingetter getter.PluginGetter) { + pluginGetter = plugingetter +} + +// GetDriver returns a logging driver by its name. +// If the driver is empty, it looks for the local driver. +func getPlugin(name string, mode int) (Creator, error) { + p, err := pluginGetter.Get(name, extName, mode) + if err != nil { + return nil, fmt.Errorf("error looking up logging plugin %s: %v", name, err) + } + + d := &logPluginProxy{p.Client()} + return makePluginCreator(name, d, p.BasePath()), nil +} + +func makePluginCreator(name string, l *logPluginProxy, basePath string) Creator { + return func(logCtx Info) (logger Logger, err error) { + defer func() { + if err != nil { + pluginGetter.Get(name, extName, getter.Release) + } + }() + root := filepath.Join(basePath, "run", "docker", "logging") + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + + id := stringid.GenerateNonCryptoID() + a := &pluginAdapter{ + driverName: name, + id: id, + plugin: l, + fifoPath: filepath.Join(root, id), + logInfo: logCtx, + } + + cap, err := a.plugin.Capabilities() + if err == nil { + a.capabilities = cap + } + + stream, err := openPluginStream(a) + if err != nil { + return nil, err + } + + a.stream = stream + a.enc = logdriver.NewLogEntryEncoder(a.stream) + + if err := l.StartLogging(strings.TrimPrefix(a.fifoPath, basePath), logCtx); err != nil { + return nil, errors.Wrapf(err, "error creating logger") + } + + if cap.ReadLogs { + return &pluginAdapterWithRead{a}, nil + } + + return a, nil + } +} diff --git a/components/engine/daemon/logger/plugin_unix.go b/components/engine/daemon/logger/plugin_unix.go new file mode 100644 index 0000000000..f254c9c57d --- /dev/null +++ b/components/engine/daemon/logger/plugin_unix.go @@ -0,0 +1,20 @@ +// +build linux solaris freebsd + +package logger + +import ( + "context" + "io" + + "github.com/pkg/errors" + "github.com/tonistiigi/fifo" + "golang.org/x/sys/unix" +) + +func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { + f, err := fifo.OpenFifo(context.Background(), a.fifoPath, unix.O_WRONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, errors.Wrapf(err, "error creating i/o pipe for log plugin: %s", a.Name()) + } + return f, nil +} diff --git a/components/engine/daemon/logger/plugin_unsupported.go b/components/engine/daemon/logger/plugin_unsupported.go new file mode 100644 index 0000000000..0a2036c838 --- /dev/null +++ b/components/engine/daemon/logger/plugin_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!solaris,!freebsd + +package logger + +import ( + "errors" + "io" +) + +func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { + return nil, errors.New("log plugin not supported") +} diff --git a/components/engine/daemon/logger/proxy.go b/components/engine/daemon/logger/proxy.go new file mode 100644 index 0000000000..53860eba69 --- /dev/null +++ b/components/engine/daemon/logger/proxy.go @@ -0,0 +1,107 @@ +package logger + +import ( + "errors" + "io" +) + +type client interface { + Call(string, interface{}, interface{}) error + Stream(string, interface{}) (io.ReadCloser, error) +} + +type logPluginProxy struct { + client +} + +type logPluginProxyStartLoggingRequest struct { + File string + Info Info +} + +type logPluginProxyStartLoggingResponse struct { + Err string +} + +func (pp *logPluginProxy) StartLogging(file string, info Info) (err error) { + var ( + req logPluginProxyStartLoggingRequest + ret logPluginProxyStartLoggingResponse + ) + + req.File = file + req.Info = info + if err = pp.Call("LogDriver.StartLogging", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyStopLoggingRequest struct { + File string +} + +type logPluginProxyStopLoggingResponse struct { + Err string +} + +func (pp *logPluginProxy) StopLogging(file string) (err error) { + var ( + req logPluginProxyStopLoggingRequest + ret logPluginProxyStopLoggingResponse + ) + + req.File = file + if err = pp.Call("LogDriver.StopLogging", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyCapabilitiesResponse struct { + Cap Capability + Err string +} + +func (pp *logPluginProxy) Capabilities() (cap Capability, err error) { + var ( + ret logPluginProxyCapabilitiesResponse + ) + + if err = pp.Call("LogDriver.Capabilities", nil, &ret); err != nil { + return + } + + cap = ret.Cap + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyReadLogsRequest struct { + Info Info + Config ReadConfig +} + +func (pp *logPluginProxy) ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) { + var ( + req logPluginProxyReadLogsRequest + ) + + req.Info = info + req.Config = config + return pp.Stream("LogDriver.ReadLogs", req) +} diff --git a/components/engine/daemon/logger/ring.go b/components/engine/daemon/logger/ring.go new file mode 100644 index 0000000000..90769d71e1 --- /dev/null +++ b/components/engine/daemon/logger/ring.go @@ -0,0 +1,218 @@ +package logger + +import ( + "errors" + "sync" + "sync/atomic" + + "github.com/Sirupsen/logrus" +) + +const ( + defaultRingMaxSize = 1e6 // 1MB +) + +// RingLogger is a ring buffer that implements the Logger interface. +// This is used when lossy logging is OK. +type RingLogger struct { + buffer *messageRing + l Logger + logInfo Info + closeFlag int32 +} + +type ringWithReader struct { + *RingLogger +} + +func (r *ringWithReader) ReadLogs(cfg ReadConfig) *LogWatcher { + reader, ok := r.l.(LogReader) + if !ok { + // something is wrong if we get here + panic("expected log reader") + } + return reader.ReadLogs(cfg) +} + +func newRingLogger(driver Logger, logInfo Info, maxSize int64) *RingLogger { + l := &RingLogger{ + buffer: newRing(maxSize), + l: driver, + logInfo: logInfo, + } + go l.run() + return l +} + +// NewRingLogger creates a new Logger that is implemented as a RingBuffer wrapping +// the passed in logger. +func NewRingLogger(driver Logger, logInfo Info, maxSize int64) Logger { + if maxSize < 0 { + maxSize = defaultRingMaxSize + } + l := newRingLogger(driver, logInfo, maxSize) + if _, ok := driver.(LogReader); ok { + return &ringWithReader{l} + } + return l +} + +// Log queues messages into the ring buffer +func (r *RingLogger) Log(msg *Message) error { + if r.closed() { + return errClosed + } + return r.buffer.Enqueue(msg) +} + +// Name returns the name of the underlying logger +func (r *RingLogger) Name() string { + return r.l.Name() +} + +func (r *RingLogger) closed() bool { + return atomic.LoadInt32(&r.closeFlag) == 1 +} + +func (r *RingLogger) setClosed() { + atomic.StoreInt32(&r.closeFlag, 1) +} + +// Close closes the logger +func (r *RingLogger) Close() error { + r.setClosed() + r.buffer.Close() + // empty out the queue + var logErr bool + for _, msg := range r.buffer.Drain() { + if logErr { + // some error logging a previous message, so re-insert to message pool + // and assume log driver is hosed + PutMessage(msg) + continue + } + + if err := r.l.Log(msg); err != nil { + logrus.WithField("driver", r.l.Name()).WithField("container", r.logInfo.ContainerID).Errorf("Error writing log message: %v", r.l) + logErr = true + } + } + return r.l.Close() +} + +// run consumes messages from the ring buffer and forwards them to the underling +// logger. +// This is run in a goroutine when the RingLogger is created +func (r *RingLogger) run() { + for { + if r.closed() { + return + } + msg, err := r.buffer.Dequeue() + if err != nil { + // buffer is closed + return + } + if err := r.l.Log(msg); err != nil { + logrus.WithField("driver", r.l.Name()).WithField("container", r.logInfo.ContainerID).Errorf("Error writing log message: %v", r.l) + } + } +} + +type messageRing struct { + mu sync.Mutex + // singals callers of `Dequeue` to wake up either on `Close` or when a new `Message` is added + wait *sync.Cond + + sizeBytes int64 // current buffer size + maxBytes int64 // max buffer size size + queue []*Message + closed bool +} + +func newRing(maxBytes int64) *messageRing { + queueSize := 1000 + if maxBytes == 0 || maxBytes == 1 { + // With 0 or 1 max byte size, the maximum size of the queue would only ever be 1 + // message long. + queueSize = 1 + } + + r := &messageRing{queue: make([]*Message, 0, queueSize), maxBytes: maxBytes} + r.wait = sync.NewCond(&r.mu) + return r +} + +// Enqueue adds a message to the buffer queue +// If the message is too big for the buffer it drops the oldest messages to make room +// If there are no messages in the queue and the message is still too big, it adds the message anyway. +func (r *messageRing) Enqueue(m *Message) error { + mSize := int64(len(m.Line)) + + r.mu.Lock() + if r.closed { + r.mu.Unlock() + return errClosed + } + if mSize+r.sizeBytes > r.maxBytes && len(r.queue) > 0 { + r.wait.Signal() + r.mu.Unlock() + return nil + } + + r.queue = append(r.queue, m) + r.sizeBytes += mSize + r.wait.Signal() + r.mu.Unlock() + return nil +} + +// Dequeue pulls a message off the queue +// If there are no messages, it waits for one. +// If the buffer is closed, it will return immediately. +func (r *messageRing) Dequeue() (*Message, error) { + r.mu.Lock() + for len(r.queue) == 0 && !r.closed { + r.wait.Wait() + } + + if r.closed { + r.mu.Unlock() + return nil, errClosed + } + + msg := r.queue[0] + r.queue = r.queue[1:] + r.sizeBytes -= int64(len(msg.Line)) + r.mu.Unlock() + return msg, nil +} + +var errClosed = errors.New("closed") + +// Close closes the buffer ensuring no new messages can be added. +// Any callers waiting to dequeue a message will be woken up. +func (r *messageRing) Close() { + r.mu.Lock() + if r.closed { + r.mu.Unlock() + return + } + + r.closed = true + r.wait.Broadcast() + r.mu.Unlock() + return +} + +// Drain drains all messages from the queue. +// This can be used after `Close()` to get any remaining messages that were in queue. +func (r *messageRing) Drain() []*Message { + r.mu.Lock() + ls := make([]*Message, 0, len(r.queue)) + ls = append(ls, r.queue...) + r.sizeBytes = 0 + r.queue = r.queue[:0] + r.mu.Unlock() + return ls +} diff --git a/components/engine/daemon/logger/ring_test.go b/components/engine/daemon/logger/ring_test.go new file mode 100644 index 0000000000..9afbb44d29 --- /dev/null +++ b/components/engine/daemon/logger/ring_test.go @@ -0,0 +1,299 @@ +package logger + +import ( + "context" + "strconv" + "testing" + "time" +) + +type mockLogger struct{ c chan *Message } + +func (l *mockLogger) Log(msg *Message) error { + l.c <- msg + return nil +} + +func (l *mockLogger) Name() string { + return "mock" +} + +func (l *mockLogger) Close() error { + return nil +} + +func TestRingLogger(t *testing.T) { + mockLog := &mockLogger{make(chan *Message)} // no buffer on this channel + ring := newRingLogger(mockLog, Info{}, 1) + defer ring.setClosed() + + // this should never block + ring.Log(&Message{Line: []byte("1")}) + ring.Log(&Message{Line: []byte("2")}) + ring.Log(&Message{Line: []byte("3")}) + + select { + case msg := <-mockLog.c: + if string(msg.Line) != "1" { + t.Fatalf("got unexpected msg: %q", string(msg.Line)) + } + case <-time.After(100 * time.Millisecond): + t.Fatal("timeout reading log message") + } + + select { + case msg := <-mockLog.c: + t.Fatalf("expected no more messages in the queue, got: %q", string(msg.Line)) + default: + } +} + +func TestRingCap(t *testing.T) { + r := newRing(5) + for i := 0; i < 10; i++ { + // queue messages with "0" to "10" + // the "5" to "10" messages should be dropped since we only allow 5 bytes in the buffer + if err := r.Enqueue(&Message{Line: []byte(strconv.Itoa(i))}); err != nil { + t.Fatal(err) + } + } + + // should have messages in the queue for "5" to "10" + for i := 0; i < 5; i++ { + m, err := r.Dequeue() + if err != nil { + t.Fatal(err) + } + if string(m.Line) != strconv.Itoa(i) { + t.Fatalf("got unexpected message for iter %d: %s", i, string(m.Line)) + } + } + + // queue a message that's bigger than the buffer cap + if err := r.Enqueue(&Message{Line: []byte("hello world")}); err != nil { + t.Fatal(err) + } + + // queue another message that's bigger than the buffer cap + if err := r.Enqueue(&Message{Line: []byte("eat a banana")}); err != nil { + t.Fatal(err) + } + + m, err := r.Dequeue() + if err != nil { + t.Fatal(err) + } + if string(m.Line) != "hello world" { + t.Fatalf("got unexpected message: %s", string(m.Line)) + } + if len(r.queue) != 0 { + t.Fatalf("expected queue to be empty, got: %d", len(r.queue)) + } +} + +func TestRingClose(t *testing.T) { + r := newRing(1) + if err := r.Enqueue(&Message{Line: []byte("hello")}); err != nil { + t.Fatal(err) + } + r.Close() + if err := r.Enqueue(&Message{}); err != errClosed { + t.Fatalf("expected errClosed, got: %v", err) + } + if len(r.queue) != 1 { + t.Fatal("expected empty queue") + } + if m, err := r.Dequeue(); err == nil || m != nil { + t.Fatal("expected err on Dequeue after close") + } + + ls := r.Drain() + if len(ls) != 1 { + t.Fatalf("expected one message: %v", ls) + } + if string(ls[0].Line) != "hello" { + t.Fatalf("got unexpected message: %s", string(ls[0].Line)) + } +} + +func TestRingDrain(t *testing.T) { + r := newRing(5) + for i := 0; i < 5; i++ { + if err := r.Enqueue(&Message{Line: []byte(strconv.Itoa(i))}); err != nil { + t.Fatal(err) + } + } + + ls := r.Drain() + if len(ls) != 5 { + t.Fatal("got unexpected length after drain") + } + + for i := 0; i < 5; i++ { + if string(ls[i].Line) != strconv.Itoa(i) { + t.Fatalf("got unexpected message at position %d: %s", i, string(ls[i].Line)) + } + } + if r.sizeBytes != 0 { + t.Fatalf("expected buffer size to be 0 after drain, got: %d", r.sizeBytes) + } + + ls = r.Drain() + if len(ls) != 0 { + t.Fatalf("expected 0 messages on 2nd drain: %v", ls) + } + +} + +type nopLogger struct{} + +func (nopLogger) Name() string { return "nopLogger" } +func (nopLogger) Close() error { return nil } +func (nopLogger) Log(*Message) error { return nil } + +func BenchmarkRingLoggerThroughputNoReceiver(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputWithReceiverDelay0(b *testing.B) { + l := NewRingLogger(nopLogger{}, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func consumeWithDelay(delay time.Duration, c <-chan *Message) (cancel func()) { + started := make(chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + close(started) + ticker := time.NewTicker(delay) + for range ticker.C { + select { + case <-ctx.Done(): + ticker.Stop() + return + case <-c: + } + } + }() + <-started + return cancel +} + +func BenchmarkRingLoggerThroughputConsumeDelay1(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(1*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay10(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(10*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay50(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(50*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay100(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(100*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay300(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(300*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay500(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(500*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} diff --git a/components/engine/daemon/logger/splunk/splunk.go b/components/engine/daemon/logger/splunk/splunk.go new file mode 100644 index 0000000000..233a2db96f --- /dev/null +++ b/components/engine/daemon/logger/splunk/splunk.go @@ -0,0 +1,626 @@ +// Package splunk provides the log driver for forwarding server logs to +// Splunk HTTP Event Collector endpoint. +package splunk + +import ( + "bytes" + "compress/gzip" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" +) + +const ( + driverName = "splunk" + splunkURLKey = "splunk-url" + splunkTokenKey = "splunk-token" + splunkSourceKey = "splunk-source" + splunkSourceTypeKey = "splunk-sourcetype" + splunkIndexKey = "splunk-index" + splunkCAPathKey = "splunk-capath" + splunkCANameKey = "splunk-caname" + splunkInsecureSkipVerifyKey = "splunk-insecureskipverify" + splunkFormatKey = "splunk-format" + splunkVerifyConnectionKey = "splunk-verify-connection" + splunkGzipCompressionKey = "splunk-gzip" + splunkGzipCompressionLevelKey = "splunk-gzip-level" + envKey = "env" + envRegexKey = "env-regex" + labelsKey = "labels" + tagKey = "tag" +) + +const ( + // How often do we send messages (if we are not reaching batch size) + defaultPostMessagesFrequency = 5 * time.Second + // How big can be batch of messages + defaultPostMessagesBatchSize = 1000 + // Maximum number of messages we can store in buffer + defaultBufferMaximum = 10 * defaultPostMessagesBatchSize + // Number of messages allowed to be queued in the channel + defaultStreamChannelSize = 4 * defaultPostMessagesBatchSize +) + +const ( + envVarPostMessagesFrequency = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY" + envVarPostMessagesBatchSize = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE" + envVarBufferMaximum = "SPLUNK_LOGGING_DRIVER_BUFFER_MAX" + envVarStreamChannelSize = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE" +) + +type splunkLoggerInterface interface { + logger.Logger + worker() +} + +type splunkLogger struct { + client *http.Client + transport *http.Transport + + url string + auth string + nullMessage *splunkMessage + + // http compression + gzipCompression bool + gzipCompressionLevel int + + // Advanced options + postMessagesFrequency time.Duration + postMessagesBatchSize int + bufferMaximum int + + // For synchronization between background worker and logger. + // We use channel to send messages to worker go routine. + // All other variables for blocking Close call before we flush all messages to HEC + stream chan *splunkMessage + lock sync.RWMutex + closed bool + closedCond *sync.Cond +} + +type splunkLoggerInline struct { + *splunkLogger + + nullEvent *splunkMessageEvent +} + +type splunkLoggerJSON struct { + *splunkLoggerInline +} + +type splunkLoggerRaw struct { + *splunkLogger + + prefix []byte +} + +type splunkMessage struct { + Event interface{} `json:"event"` + Time string `json:"time"` + Host string `json:"host"` + Source string `json:"source,omitempty"` + SourceType string `json:"sourcetype,omitempty"` + Index string `json:"index,omitempty"` +} + +type splunkMessageEvent struct { + Line interface{} `json:"line"` + Source string `json:"source"` + Tag string `json:"tag,omitempty"` + Attrs map[string]string `json:"attrs,omitempty"` +} + +const ( + splunkFormatRaw = "raw" + splunkFormatJSON = "json" + splunkFormatInline = "inline" +) + +func init() { + if err := logger.RegisterLogDriver(driverName, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates splunk logger driver using configuration passed in context +func New(info logger.Info) (logger.Logger, error) { + hostname, err := info.Hostname() + if err != nil { + return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) + } + + // Parse and validate Splunk URL + splunkURL, err := parseURL(info) + if err != nil { + return nil, err + } + + // Splunk Token is required parameter + splunkToken, ok := info.Config[splunkTokenKey] + if !ok { + return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) + } + + tlsConfig := &tls.Config{} + + // Splunk is using autogenerated certificates by default, + // allow users to trust them with skipping verification + if insecureSkipVerifyStr, ok := info.Config[splunkInsecureSkipVerifyKey]; ok { + insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) + if err != nil { + return nil, err + } + tlsConfig.InsecureSkipVerify = insecureSkipVerify + } + + // If path to the root certificate is provided - load it + if caPath, ok := info.Config[splunkCAPathKey]; ok { + caCert, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, err + } + caPool := x509.NewCertPool() + caPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caPool + } + + if caName, ok := info.Config[splunkCANameKey]; ok { + tlsConfig.ServerName = caName + } + + gzipCompression := false + if gzipCompressionStr, ok := info.Config[splunkGzipCompressionKey]; ok { + gzipCompression, err = strconv.ParseBool(gzipCompressionStr) + if err != nil { + return nil, err + } + } + + gzipCompressionLevel := gzip.DefaultCompression + if gzipCompressionLevelStr, ok := info.Config[splunkGzipCompressionLevelKey]; ok { + var err error + gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32) + if err != nil { + return nil, err + } + gzipCompressionLevel = int(gzipCompressionLevel64) + if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression { + err := fmt.Errorf("Not supported level '%s' for %s (supported values between %d and %d).", + gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression) + return nil, err + } + } + + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + } + client := &http.Client{ + Transport: transport, + } + + source := info.Config[splunkSourceKey] + sourceType := info.Config[splunkSourceTypeKey] + index := info.Config[splunkIndexKey] + + var nullMessage = &splunkMessage{ + Host: hostname, + Source: source, + SourceType: sourceType, + Index: index, + } + + // Allow user to remove tag from the messages by setting tag to empty string + tag := "" + if tagTemplate, ok := info.Config[tagKey]; !ok || tagTemplate != "" { + tag, err = loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + } + + attrs, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + + var ( + postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency) + postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize) + bufferMaximum = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum) + streamChannelSize = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize) + ) + + logger := &splunkLogger{ + client: client, + transport: transport, + url: splunkURL.String(), + auth: "Splunk " + splunkToken, + nullMessage: nullMessage, + gzipCompression: gzipCompression, + gzipCompressionLevel: gzipCompressionLevel, + stream: make(chan *splunkMessage, streamChannelSize), + postMessagesFrequency: postMessagesFrequency, + postMessagesBatchSize: postMessagesBatchSize, + bufferMaximum: bufferMaximum, + } + + // By default we verify connection, but we allow use to skip that + verifyConnection := true + if verifyConnectionStr, ok := info.Config[splunkVerifyConnectionKey]; ok { + var err error + verifyConnection, err = strconv.ParseBool(verifyConnectionStr) + if err != nil { + return nil, err + } + } + if verifyConnection { + err = verifySplunkConnection(logger) + if err != nil { + return nil, err + } + } + + var splunkFormat string + if splunkFormatParsed, ok := info.Config[splunkFormatKey]; ok { + switch splunkFormatParsed { + case splunkFormatInline: + case splunkFormatJSON: + case splunkFormatRaw: + default: + return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat) + } + splunkFormat = splunkFormatParsed + } else { + splunkFormat = splunkFormatInline + } + + var loggerWrapper splunkLoggerInterface + + switch splunkFormat { + case splunkFormatInline: + nullEvent := &splunkMessageEvent{ + Tag: tag, + Attrs: attrs, + } + + loggerWrapper = &splunkLoggerInline{logger, nullEvent} + case splunkFormatJSON: + nullEvent := &splunkMessageEvent{ + Tag: tag, + Attrs: attrs, + } + + loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}} + case splunkFormatRaw: + var prefix bytes.Buffer + if tag != "" { + prefix.WriteString(tag) + prefix.WriteString(" ") + } + for key, value := range attrs { + prefix.WriteString(key) + prefix.WriteString("=") + prefix.WriteString(value) + prefix.WriteString(" ") + } + + loggerWrapper = &splunkLoggerRaw{logger, prefix.Bytes()} + default: + return nil, fmt.Errorf("Unexpected format %s", splunkFormat) + } + + go loggerWrapper.worker() + + return loggerWrapper, nil +} + +func (l *splunkLoggerInline) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + + event := *l.nullEvent + event.Line = string(msg.Line) + event.Source = msg.Source + + message.Event = &event + logger.PutMessage(msg) + return l.queueMessageAsync(message) +} + +func (l *splunkLoggerJSON) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + event := *l.nullEvent + + var rawJSONMessage json.RawMessage + if err := json.Unmarshal(msg.Line, &rawJSONMessage); err == nil { + event.Line = &rawJSONMessage + } else { + event.Line = string(msg.Line) + } + + event.Source = msg.Source + + message.Event = &event + logger.PutMessage(msg) + return l.queueMessageAsync(message) +} + +func (l *splunkLoggerRaw) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + + message.Event = string(append(l.prefix, msg.Line...)) + logger.PutMessage(msg) + return l.queueMessageAsync(message) +} + +func (l *splunkLogger) queueMessageAsync(message *splunkMessage) error { + l.lock.RLock() + defer l.lock.RUnlock() + if l.closedCond != nil { + return fmt.Errorf("%s: driver is closed", driverName) + } + l.stream <- message + return nil +} + +func (l *splunkLogger) worker() { + timer := time.NewTicker(l.postMessagesFrequency) + var messages []*splunkMessage + for { + select { + case message, open := <-l.stream: + if !open { + l.postMessages(messages, true) + l.lock.Lock() + defer l.lock.Unlock() + l.transport.CloseIdleConnections() + l.closed = true + l.closedCond.Signal() + return + } + messages = append(messages, message) + // Only sending when we get exactly to the batch size, + // This also helps not to fire postMessages on every new message, + // when previous try failed. + if len(messages)%l.postMessagesBatchSize == 0 { + messages = l.postMessages(messages, false) + } + case <-timer.C: + messages = l.postMessages(messages, false) + } + } +} + +func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage { + messagesLen := len(messages) + for i := 0; i < messagesLen; i += l.postMessagesBatchSize { + upperBound := i + l.postMessagesBatchSize + if upperBound > messagesLen { + upperBound = messagesLen + } + if err := l.tryPostMessages(messages[i:upperBound]); err != nil { + logrus.Error(err) + if messagesLen-i >= l.bufferMaximum || lastChance { + // If this is last chance - print them all to the daemon log + if lastChance { + upperBound = messagesLen + } + // Not all sent, but buffer has got to its maximum, let's log all messages + // we could not send and return buffer minus one batch size + for j := i; j < upperBound; j++ { + if jsonEvent, err := json.Marshal(messages[j]); err != nil { + logrus.Error(err) + } else { + logrus.Error(fmt.Errorf("Failed to send a message '%s'", string(jsonEvent))) + } + } + return messages[upperBound:messagesLen] + } + // Not all sent, returning buffer from where we have not sent messages + return messages[i:messagesLen] + } + } + // All sent, return empty buffer + return messages[:0] +} + +func (l *splunkLogger) tryPostMessages(messages []*splunkMessage) error { + if len(messages) == 0 { + return nil + } + var buffer bytes.Buffer + var writer io.Writer + var gzipWriter *gzip.Writer + var err error + // If gzip compression is enabled - create gzip writer with specified compression + // level. If gzip compression is disabled, use standard buffer as a writer + if l.gzipCompression { + gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel) + if err != nil { + return err + } + writer = gzipWriter + } else { + writer = &buffer + } + for _, message := range messages { + jsonEvent, err := json.Marshal(message) + if err != nil { + return err + } + if _, err := writer.Write(jsonEvent); err != nil { + return err + } + } + // If gzip compression is enabled, tell it, that we are done + if l.gzipCompression { + err = gzipWriter.Close() + if err != nil { + return err + } + } + req, err := http.NewRequest("POST", l.url, bytes.NewBuffer(buffer.Bytes())) + if err != nil { + return err + } + req.Header.Set("Authorization", l.auth) + // Tell if we are sending gzip compressed body + if l.gzipCompression { + req.Header.Set("Content-Encoding", "gzip") + } + res, err := l.client.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + var body []byte + body, err = ioutil.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("%s: failed to send event - %s - %s", driverName, res.Status, body) + } + io.Copy(ioutil.Discard, res.Body) + return nil +} + +func (l *splunkLogger) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + if l.closedCond == nil { + l.closedCond = sync.NewCond(&l.lock) + close(l.stream) + for !l.closed { + l.closedCond.Wait() + } + } + return nil +} + +func (l *splunkLogger) Name() string { + return driverName +} + +func (l *splunkLogger) createSplunkMessage(msg *logger.Message) *splunkMessage { + message := *l.nullMessage + message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/float64(time.Second)) + return &message +} + +// ValidateLogOpt looks for all supported by splunk driver options +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case splunkURLKey: + case splunkTokenKey: + case splunkSourceKey: + case splunkSourceTypeKey: + case splunkIndexKey: + case splunkCAPathKey: + case splunkCANameKey: + case splunkInsecureSkipVerifyKey: + case splunkFormatKey: + case splunkVerifyConnectionKey: + case splunkGzipCompressionKey: + case splunkGzipCompressionLevelKey: + case envKey: + case envRegexKey: + case labelsKey: + case tagKey: + default: + return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName) + } + } + return nil +} + +func parseURL(info logger.Info) (*url.URL, error) { + splunkURLStr, ok := info.Config[splunkURLKey] + if !ok { + return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey) + } + + splunkURL, err := url.Parse(splunkURLStr) + if err != nil { + return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey) + } + + if !urlutil.IsURL(splunkURLStr) || + !splunkURL.IsAbs() || + (splunkURL.Path != "" && splunkURL.Path != "/") || + splunkURL.RawQuery != "" || + splunkURL.Fragment != "" { + return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey) + } + + splunkURL.Path = "/services/collector/event/1.0" + + return splunkURL, nil +} + +func verifySplunkConnection(l *splunkLogger) error { + req, err := http.NewRequest(http.MethodOptions, l.url, nil) + if err != nil { + return err + } + res, err := l.client.Do(req) + if err != nil { + return err + } + if res.Body != nil { + defer res.Body.Close() + } + if res.StatusCode != http.StatusOK { + var body []byte + body, err = ioutil.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, res.Status, body) + } + return nil +} + +func getAdvancedOptionDuration(envName string, defaultValue time.Duration) time.Duration { + valueStr := os.Getenv(envName) + if valueStr == "" { + return defaultValue + } + parsedValue, err := time.ParseDuration(valueStr) + if err != nil { + logrus.Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err)) + return defaultValue + } + return parsedValue +} + +func getAdvancedOptionInt(envName string, defaultValue int) int { + valueStr := os.Getenv(envName) + if valueStr == "" { + return defaultValue + } + parsedValue, err := strconv.ParseInt(valueStr, 10, 32) + if err != nil { + logrus.Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err)) + return defaultValue + } + return int(parsedValue) +} diff --git a/components/engine/daemon/logger/splunk/splunk_test.go b/components/engine/daemon/logger/splunk/splunk_test.go new file mode 100644 index 0000000000..cbe9a55bf9 --- /dev/null +++ b/components/engine/daemon/logger/splunk/splunk_test.go @@ -0,0 +1,1306 @@ +package splunk + +import ( + "compress/gzip" + "fmt" + "os" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" +) + +// Validate options +func TestValidateLogOpt(t *testing.T) { + err := ValidateLogOpt(map[string]string{ + splunkURLKey: "http://127.0.0.1", + splunkTokenKey: "2160C7EF-2CE9-4307-A180-F852B99CF417", + splunkSourceKey: "mysource", + splunkSourceTypeKey: "mysourcetype", + splunkIndexKey: "myindex", + splunkCAPathKey: "/usr/cert.pem", + splunkCANameKey: "ca_name", + splunkInsecureSkipVerifyKey: "true", + splunkFormatKey: "json", + splunkVerifyConnectionKey: "true", + splunkGzipCompressionKey: "true", + splunkGzipCompressionLevelKey: "1", + envKey: "a", + envRegexKey: "^foo", + labelsKey: "b", + tagKey: "c", + }) + if err != nil { + t.Fatal(err) + } + + err = ValidateLogOpt(map[string]string{ + "not-supported-option": "a", + }) + if err == nil { + t.Fatal("Expecting error on unsupported options") + } +} + +// Driver require user to specify required options +func TestNewMissedConfig(t *testing.T) { + info := logger.Info{ + Config: map[string]string{}, + } + _, err := New(info) + if err == nil { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Driver require user to specify splunk-url +func TestNewMissedUrl(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + splunkTokenKey: "4642492F-D8BD-47F1-A005-0C08AE4657DF", + }, + } + _, err := New(info) + if err.Error() != "splunk: splunk-url is expected" { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Driver require user to specify splunk-token +func TestNewMissedToken(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: "http://127.0.0.1:8088", + }, + } + _, err := New(info) + if err.Error() != "splunk: splunk-token is expected" { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Test default settings +func TestDefault(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if loggerDriver.Name() != driverName { + t.Fatal("Unexpected logger driver name") + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Found not default values setup in Splunk Logging Driver.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notajson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + if *hec.gzipEnabled { + t.Fatal("Gzip should not be used") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "{\"a\":\"b\"}" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message2) + } + + if event, err := message2.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "notajson" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify inline format with a not default settings for most of options +func TestInlineFormatWithNonDefaultOptions(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkSourceKey: "mysource", + splunkSourceTypeKey: "mysourcetype", + splunkIndexKey: "myindex", + splunkFormatKey: splunkFormatInline, + splunkGzipCompressionKey: "true", + tagKey: "{{.ImageName}}/{{.Name}}", + labelsKey: "a", + envRegexKey: "^foo", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + ContainerLabels: map[string]string{ + "a": "b", + }, + ContainerEnv: []string{"foo_finder=bar"}, + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "mysource" || + splunkLoggerDriver.nullMessage.SourceType != "mysourcetype" || + splunkLoggerDriver.nullMessage.Index != "myindex" || + splunkLoggerDriver.gzipCompression != true || + splunkLoggerDriver.gzipCompressionLevel != gzip.DefaultCompression || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Values do not match configuration.") + } + + messageTime := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("1"), Source: "stdout", Timestamp: messageTime}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 1 { + t.Fatal("Expected one message") + } + + if !*hec.gzipEnabled { + t.Fatal("Gzip should be used") + } + + message := hec.messages[0] + if message.Time != fmt.Sprintf("%f", float64(messageTime.UnixNano())/float64(time.Second)) || + message.Host != hostname || + message.Source != "mysource" || + message.SourceType != "mysourcetype" || + message.Index != "myindex" { + t.Fatalf("Unexpected values of message %v", message) + } + + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "1" || + event["source"] != "stdout" || + event["tag"] != "container_image_name/container_name" || + event["attrs"].(map[string]interface{})["a"] != "b" || + event["attrs"].(map[string]interface{})["foo_finder"] != "bar" || + len(event) != 4 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify JSON format +func TestJsonFormat(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatJSON, + splunkGzipCompressionKey: "true", + splunkGzipCompressionLevelKey: "1", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerJSON) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != true || + splunkLoggerDriver.gzipCompressionLevel != gzip.BestSpeed || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"].(map[string]interface{})["a"] != "b" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + // If message cannot be parsed as JSON - it should be sent as a line + if event, err := message2.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "notjson" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message 2 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify raw format +func TestRawFormat(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "containeriid " { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid {\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid notjson" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify raw format with labels +func TestRawFormatWithLabels(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + labelsKey: "a", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + ContainerLabels: map[string]string{ + "a": "b", + }, + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "containeriid a=b " { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid a=b {\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid a=b notjson" { + t.Fatalf("Unexpected event in message 2 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that Splunk Logging Driver can accept tag="" which will allow to send raw messages +// in the same way we get them in stdout/stderr +func TestRawFormatWithoutTag(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + tagKey: "", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "" { + t.Log(string(splunkLoggerDriver.prefix) + "a") + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "{\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "notjson" { + t.Fatalf("Unexpected event in message 2 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that we will send messages in batches with default batching parameters, +// but change frequency to be sure that numOfRequests will match expected 17 requests +func TestBatching(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "10h"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < defaultStreamChannelSize*4; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != defaultStreamChannelSize*4 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 16 batches + if hec.numOfRequests != 17 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } +} + +// Verify that test is using time to fire events not rare than specified frequency +func TestFrequency(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "5ms"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + time.Sleep(15 * time.Millisecond) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 10 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 10 to verify that we have sent messages with required frequency, + // but because frequency is too small (to keep test quick), instead of 11, use 9 if context switches will be slow + if hec.numOfRequests < 9 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } +} + +// Simulate behavior similar to first version of Splunk Logging Driver, when we were sending one message +// per request +func TestOneMessagePerRequest(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "10h"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, "1"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "1"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 10 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 10 messages + if hec.numOfRequests != 11 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Driver should not be created when HEC is unresponsive +func TestVerify(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + _, err := New(info) + if err == nil { + t.Fatal("Expecting driver to fail, when server is unresponsive") + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that user can specify to skip verification that Splunk HEC is working. +// Also in this test we verify retry logic. +func TestSkipVerify(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < defaultStreamChannelSize*2; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be accepted at this point") + } + + hec.simulateServerError = false + + for i := defaultStreamChannelSize * 2; i < defaultStreamChannelSize*4; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != defaultStreamChannelSize*4 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify logic for when we filled whole buffer +func TestBufferMaximum(t *testing.T) { + if err := os.Setenv(envVarPostMessagesBatchSize, "2"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "10"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < 11; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be accepted at this point") + } + + hec.simulateServerError = false + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 9 { + t.Fatalf("Expected # of messages %d, got %d", 9, len(hec.messages)) + } + + // First 1000 messages are written to daemon log when buffer was full + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i+2) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Verify that we are not blocking close when HEC is down for the whole time +func TestServerAlwaysDown(t *testing.T) { + if err := os.Setenv(envVarPostMessagesBatchSize, "2"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "4"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < 5; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be sent") + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Cannot send messages after we close driver +func TestCannotSendAfterClose(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if err := loggerDriver.Log(&logger.Message{Line: []byte("message1"), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if err := loggerDriver.Log(&logger.Message{Line: []byte("message2"), Source: "stdout", Timestamp: time.Now()}); err == nil { + t.Fatal("Driver should not allow to send messages after close") + } + + if len(hec.messages) != 1 { + t.Fatal("Only one message should be sent") + } + + message := hec.messages[0] + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "message1" { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/daemon/logger/splunk/splunkhecmock_test.go b/components/engine/daemon/logger/splunk/splunkhecmock_test.go new file mode 100644 index 0000000000..e508948280 --- /dev/null +++ b/components/engine/daemon/logger/splunk/splunkhecmock_test.go @@ -0,0 +1,157 @@ +package splunk + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "testing" +) + +func (message *splunkMessage) EventAsString() (string, error) { + if val, ok := message.Event.(string); ok { + return val, nil + } + return "", fmt.Errorf("Cannot cast Event %v to string", message.Event) +} + +func (message *splunkMessage) EventAsMap() (map[string]interface{}, error) { + if val, ok := message.Event.(map[string]interface{}); ok { + return val, nil + } + return nil, fmt.Errorf("Cannot cast Event %v to map", message.Event) +} + +type HTTPEventCollectorMock struct { + tcpAddr *net.TCPAddr + tcpListener *net.TCPListener + + token string + simulateServerError bool + + test *testing.T + + connectionVerified bool + gzipEnabled *bool + messages []*splunkMessage + numOfRequests int +} + +func NewHTTPEventCollectorMock(t *testing.T) *HTTPEventCollectorMock { + tcpAddr := &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 0, Zone: ""} + tcpListener, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + t.Fatal(err) + } + return &HTTPEventCollectorMock{ + tcpAddr: tcpAddr, + tcpListener: tcpListener, + token: "4642492F-D8BD-47F1-A005-0C08AE4657DF", + simulateServerError: false, + test: t, + connectionVerified: false} +} + +func (hec *HTTPEventCollectorMock) URL() string { + return "http://" + hec.tcpListener.Addr().String() +} + +func (hec *HTTPEventCollectorMock) Serve() error { + return http.Serve(hec.tcpListener, hec) +} + +func (hec *HTTPEventCollectorMock) Close() error { + return hec.tcpListener.Close() +} + +func (hec *HTTPEventCollectorMock) ServeHTTP(writer http.ResponseWriter, request *http.Request) { + var err error + + hec.numOfRequests++ + + if hec.simulateServerError { + if request.Body != nil { + defer request.Body.Close() + } + writer.WriteHeader(http.StatusInternalServerError) + return + } + + switch request.Method { + case http.MethodOptions: + // Verify that options method is getting called only once + if hec.connectionVerified { + hec.test.Errorf("Connection should not be verified more than once. Got second request with %s method.", request.Method) + } + hec.connectionVerified = true + writer.WriteHeader(http.StatusOK) + case http.MethodPost: + // Always verify that Driver is using correct path to HEC + if request.URL.String() != "/services/collector/event/1.0" { + hec.test.Errorf("Unexpected path %v", request.URL) + } + defer request.Body.Close() + + if authorization, ok := request.Header["Authorization"]; !ok || authorization[0] != ("Splunk "+hec.token) { + hec.test.Error("Authorization header is invalid.") + } + + gzipEnabled := false + if contentEncoding, ok := request.Header["Content-Encoding"]; ok && contentEncoding[0] == "gzip" { + gzipEnabled = true + } + + if hec.gzipEnabled == nil { + hec.gzipEnabled = &gzipEnabled + } else if gzipEnabled != *hec.gzipEnabled { + // Nothing wrong with that, but we just know that Splunk Logging Driver does not do that + hec.test.Error("Driver should not change Content Encoding.") + } + + var gzipReader *gzip.Reader + var reader io.Reader + if gzipEnabled { + gzipReader, err = gzip.NewReader(request.Body) + if err != nil { + hec.test.Fatal(err) + } + reader = gzipReader + } else { + reader = request.Body + } + + // Read body + var body []byte + body, err = ioutil.ReadAll(reader) + if err != nil { + hec.test.Fatal(err) + } + + // Parse message + messageStart := 0 + for i := 0; i < len(body); i++ { + if i == len(body)-1 || (body[i] == '}' && body[i+1] == '{') { + var message splunkMessage + err = json.Unmarshal(body[messageStart:i+1], &message) + if err != nil { + hec.test.Log(string(body[messageStart : i+1])) + hec.test.Fatal(err) + } + hec.messages = append(hec.messages, &message) + messageStart = i + 1 + } + } + + if gzipEnabled { + gzipReader.Close() + } + + writer.WriteHeader(http.StatusOK) + default: + hec.test.Errorf("Unexpected HTTP method %s", http.MethodOptions) + writer.WriteHeader(http.StatusBadRequest) + } +} diff --git a/components/engine/daemon/logger/syslog/syslog.go b/components/engine/daemon/logger/syslog/syslog.go new file mode 100644 index 0000000000..925473aeef --- /dev/null +++ b/components/engine/daemon/logger/syslog/syslog.go @@ -0,0 +1,265 @@ +// Package syslog provides the logdriver for forwarding server logs to syslog endpoints. +package syslog + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "os" + "strconv" + "strings" + "time" + + syslog "github.com/RackSec/srslog" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + name = "syslog" + secureProto = "tcp+tls" +) + +var facilities = map[string]syslog.Priority{ + "kern": syslog.LOG_KERN, + "user": syslog.LOG_USER, + "mail": syslog.LOG_MAIL, + "daemon": syslog.LOG_DAEMON, + "auth": syslog.LOG_AUTH, + "syslog": syslog.LOG_SYSLOG, + "lpr": syslog.LOG_LPR, + "news": syslog.LOG_NEWS, + "uucp": syslog.LOG_UUCP, + "cron": syslog.LOG_CRON, + "authpriv": syslog.LOG_AUTHPRIV, + "ftp": syslog.LOG_FTP, + "local0": syslog.LOG_LOCAL0, + "local1": syslog.LOG_LOCAL1, + "local2": syslog.LOG_LOCAL2, + "local3": syslog.LOG_LOCAL3, + "local4": syslog.LOG_LOCAL4, + "local5": syslog.LOG_LOCAL5, + "local6": syslog.LOG_LOCAL6, + "local7": syslog.LOG_LOCAL7, +} + +type syslogger struct { + writer *syslog.Writer +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// rsyslog uses appname part of syslog message to fill in an %syslogtag% template +// attribute in rsyslog.conf. In order to be backward compatible to rfc3164 +// tag will be also used as an appname +func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// The timestamp field in rfc5424 is derived from rfc3339. Whereas rfc3339 makes allowances +// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximum +// resolution is limited to "TIME-SECFRAC" which is 6 (microsecond resolution) +func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format("2006-01-02T15:04:05.999999Z07:00") + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// New creates a syslog logger using the configuration passed in on +// the context. Supported context configuration variables are +// syslog-address, syslog-facility, syslog-format. +func New(info logger.Info) (logger.Logger, error) { + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + proto, address, err := parseAddress(info.Config["syslog-address"]) + if err != nil { + return nil, err + } + + facility, err := parseFacility(info.Config["syslog-facility"]) + if err != nil { + return nil, err + } + + syslogFormatter, syslogFramer, err := parseLogFormat(info.Config["syslog-format"], proto) + if err != nil { + return nil, err + } + + var log *syslog.Writer + if proto == secureProto { + tlsConfig, tlsErr := parseTLSConfig(info.Config) + if tlsErr != nil { + return nil, tlsErr + } + log, err = syslog.DialWithTLSConfig(proto, address, facility, tag, tlsConfig) + } else { + log, err = syslog.Dial(proto, address, facility, tag) + } + + if err != nil { + return nil, err + } + + log.SetFormatter(syslogFormatter) + log.SetFramer(syslogFramer) + + return &syslogger{ + writer: log, + }, nil +} + +func (s *syslogger) Log(msg *logger.Message) error { + line := string(msg.Line) + logger.PutMessage(msg) + if msg.Source == "stderr" { + return s.writer.Err(line) + } + return s.writer.Info(line) +} + +func (s *syslogger) Close() error { + return s.writer.Close() +} + +func (s *syslogger) Name() string { + return name +} + +func parseAddress(address string) (string, string, error) { + if address == "" { + return "", "", nil + } + if !urlutil.IsTransportURL(address) { + return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", "", err + } + + // unix and unixgram socket validation + if url.Scheme == "unix" || url.Scheme == "unixgram" { + if _, err := os.Stat(url.Path); err != nil { + return "", "", err + } + return url.Scheme, url.Path, nil + } + + // here we process tcp|udp + host := url.Host + if _, _, err := net.SplitHostPort(host); err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return "", "", err + } + host = host + ":514" + } + + return url.Scheme, host, nil +} + +// ValidateLogOpt looks for syslog specific log options +// syslog-address, syslog-facility. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "env-regex": + case "labels": + case "syslog-address": + case "syslog-facility": + case "syslog-tls-ca-cert": + case "syslog-tls-cert": + case "syslog-tls-key": + case "syslog-tls-skip-verify": + case "tag": + case "syslog-format": + default: + return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) + } + } + if _, _, err := parseAddress(cfg["syslog-address"]); err != nil { + return err + } + if _, err := parseFacility(cfg["syslog-facility"]); err != nil { + return err + } + if _, _, err := parseLogFormat(cfg["syslog-format"], ""); err != nil { + return err + } + return nil +} + +func parseFacility(facility string) (syslog.Priority, error) { + if facility == "" { + return syslog.LOG_DAEMON, nil + } + + if syslogFacility, valid := facilities[facility]; valid { + return syslogFacility, nil + } + + fInt, err := strconv.Atoi(facility) + if err == nil && 0 <= fInt && fInt <= 23 { + return syslog.Priority(fInt << 3), nil + } + + return syslog.Priority(0), errors.New("invalid syslog facility") +} + +func parseTLSConfig(cfg map[string]string) (*tls.Config, error) { + _, skipVerify := cfg["syslog-tls-skip-verify"] + + opts := tlsconfig.Options{ + CAFile: cfg["syslog-tls-ca-cert"], + CertFile: cfg["syslog-tls-cert"], + KeyFile: cfg["syslog-tls-key"], + InsecureSkipVerify: skipVerify, + } + + return tlsconfig.Client(opts) +} + +func parseLogFormat(logFormat, proto string) (syslog.Formatter, syslog.Framer, error) { + switch logFormat { + case "": + return syslog.UnixFormatter, syslog.DefaultFramer, nil + case "rfc3164": + return syslog.RFC3164Formatter, syslog.DefaultFramer, nil + case "rfc5424": + if proto == secureProto { + return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + } + return rfc5424formatterWithAppNameAsTag, syslog.DefaultFramer, nil + case "rfc5424micro": + if proto == secureProto { + return rfc5424microformatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + } + return rfc5424microformatterWithAppNameAsTag, syslog.DefaultFramer, nil + default: + return nil, nil, errors.New("Invalid syslog format") + } + +} diff --git a/components/engine/daemon/logger/syslog/syslog_test.go b/components/engine/daemon/logger/syslog/syslog_test.go new file mode 100644 index 0000000000..501561064b --- /dev/null +++ b/components/engine/daemon/logger/syslog/syslog_test.go @@ -0,0 +1,62 @@ +package syslog + +import ( + "reflect" + "testing" + + syslog "github.com/RackSec/srslog" +) + +func functionMatches(expectedFun interface{}, actualFun interface{}) bool { + return reflect.ValueOf(expectedFun).Pointer() == reflect.ValueOf(actualFun).Pointer() +} + +func TestParseLogFormat(t *testing.T) { + formatter, framer, err := parseLogFormat("rfc5424", "udp") + if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424", "tcp+tls") + if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { + t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424micro", "udp") + if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424micro", "tcp+tls") + if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { + t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc3164", "") + if err != nil || !functionMatches(syslog.RFC3164Formatter, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc3164 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("", "") + if err != nil || !functionMatches(syslog.UnixFormatter, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse empty format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("invalid", "") + if err == nil { + t.Fatal("Failed to parse invalid format", err, formatter, framer) + } +} + +func TestValidateLogOptEmpty(t *testing.T) { + emptyConfig := make(map[string]string) + if err := ValidateLogOpt(emptyConfig); err != nil { + t.Fatal("Failed to parse empty config", err) + } +} diff --git a/components/engine/daemon/logs.go b/components/engine/daemon/logs.go new file mode 100644 index 0000000000..96e1b8a491 --- /dev/null +++ b/components/engine/daemon/logs.go @@ -0,0 +1,175 @@ +package daemon + +import ( + "errors" + "strconv" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" +) + +// ContainerLogs copies the container's log channel to the channel provided in +// the config. If ContainerLogs returns an error, no messages have been copied. +// and the channel will be closed without data. +// +// if it returns nil, the config channel will be active and return log +// messages until it runs out or the context is canceled. +func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *types.ContainerLogsOptions) (<-chan *backend.LogMessage, error) { + lg := logrus.WithFields(logrus.Fields{ + "module": "daemon", + "method": "(*Daemon).ContainerLogs", + "container": containerName, + }) + + if !(config.ShowStdout || config.ShowStderr) { + return nil, errors.New("You must choose at least one stream") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + return nil, err + } + + if container.RemovalInProgress || container.Dead { + return nil, errors.New("can not get logs from container which is dead or marked for removal") + } + + if container.HostConfig.LogConfig.Type == "none" { + return nil, logger.ErrReadLogsNotSupported + } + + cLog, cLogCreated, err := daemon.getLogger(container) + if err != nil { + return nil, err + } + if cLogCreated { + defer func() { + if err = cLog.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + }() + } + + logReader, ok := cLog.(logger.LogReader) + if !ok { + return nil, logger.ErrReadLogsNotSupported + } + + follow := config.Follow && !cLogCreated + tailLines, err := strconv.Atoi(config.Tail) + if err != nil { + tailLines = -1 + } + + var since time.Time + if config.Since != "" { + s, n, err := timetypes.ParseTimestamps(config.Since, 0) + if err != nil { + return nil, err + } + since = time.Unix(s, n) + } + + readConfig := logger.ReadConfig{ + Since: since, + Tail: tailLines, + Follow: follow, + } + + logs := logReader.ReadLogs(readConfig) + + // past this point, we can't possibly return any errors, so we can just + // start a goroutine and return to tell the caller not to expect errors + // (if the caller wants to give up on logs, they have to cancel the context) + // this goroutine functions as a shim between the logger and the caller. + messageChan := make(chan *backend.LogMessage, 1) + go func() { + // set up some defers + defer logs.Close() + + // close the messages channel. closing is the only way to signal above + // that we're doing with logs (other than context cancel i guess). + defer close(messageChan) + + lg.Debug("begin logs") + for { + select { + // i do not believe as the system is currently designed any error + // is possible, but we should be prepared to handle it anyway. if + // we do get an error, copy only the error field to a new object so + // we don't end up with partial data in the other fields + case err := <-logs.Err: + lg.Errorf("Error streaming logs: %v", err) + select { + case <-ctx.Done(): + case messageChan <- &backend.LogMessage{Err: err}: + } + return + case <-ctx.Done(): + lg.Debug("logs: end stream, ctx is done: %v", ctx.Err()) + return + case msg, ok := <-logs.Msg: + // there is some kind of pool or ring buffer in the logger that + // produces these messages, and a possible future optimization + // might be to use that pool and reuse message objects + if !ok { + lg.Debug("end logs") + return + } + m := msg.AsLogMessage() // just a pointer conversion, does not copy data + + // there could be a case where the reader stops accepting + // messages and the context is canceled. we need to check that + // here, or otherwise we risk blocking forever on the message + // send. + select { + case <-ctx.Done(): + return + case messageChan <- m: + } + } + } + }() + return messageChan, nil +} + +func (daemon *Daemon) getLogger(container *container.Container) (l logger.Logger, created bool, err error) { + container.Lock() + if container.State.Running { + l = container.LogDriver + } + container.Unlock() + if l == nil { + created = true + l, err = container.StartLogger() + } + return +} + +// mergeLogConfig merges the daemon log config to the container's log config if the container's log driver is not specified. +func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) error { + if cfg.Type == "" { + cfg.Type = daemon.defaultLogConfig.Type + } + + if cfg.Config == nil { + cfg.Config = make(map[string]string) + } + + if cfg.Type == daemon.defaultLogConfig.Type { + for k, v := range daemon.defaultLogConfig.Config { + if _, ok := cfg.Config[k]; !ok { + cfg.Config[k] = v + } + } + } + + return logger.ValidateLogOpts(cfg.Type, cfg.Config) +} diff --git a/components/engine/daemon/logs_test.go b/components/engine/daemon/logs_test.go new file mode 100644 index 0000000000..0c36299e09 --- /dev/null +++ b/components/engine/daemon/logs_test.go @@ -0,0 +1,15 @@ +package daemon + +import ( + "testing" + + containertypes "github.com/docker/docker/api/types/container" +) + +func TestMergeAndVerifyLogConfigNilConfig(t *testing.T) { + d := &Daemon{defaultLogConfig: containertypes.LogConfig{Type: "json-file", Config: map[string]string{"max-file": "1"}}} + cfg := containertypes.LogConfig{Type: d.defaultLogConfig.Type} + if err := d.mergeAndVerifyLogConfig(&cfg); err != nil { + t.Fatal(err) + } +} diff --git a/components/engine/daemon/metrics.go b/components/engine/daemon/metrics.go new file mode 100644 index 0000000000..bf9e49d044 --- /dev/null +++ b/components/engine/daemon/metrics.go @@ -0,0 +1,174 @@ +package daemon + +import ( + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/go-metrics" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" +) + +const metricsPluginType = "MetricsCollector" + +var ( + containerActions metrics.LabeledTimer + containerStates metrics.LabeledGauge + imageActions metrics.LabeledTimer + networkActions metrics.LabeledTimer + engineInfo metrics.LabeledGauge + engineCpus metrics.Gauge + engineMemory metrics.Gauge + healthChecksCounter metrics.Counter + healthChecksFailedCounter metrics.Counter + + stateCtr *stateCounter +) + +func init() { + ns := metrics.NewNamespace("engine", "daemon", nil) + containerActions = ns.NewLabeledTimer("container_actions", "The number of seconds it takes to process each container action", "action") + for _, a := range []string{ + "start", + "changes", + "commit", + "create", + "delete", + } { + containerActions.WithValues(a).Update(0) + } + + networkActions = ns.NewLabeledTimer("network_actions", "The number of seconds it takes to process each network action", "action") + engineInfo = ns.NewLabeledGauge("engine", "The information related to the engine and the OS it is running on", metrics.Unit("info"), + "version", + "commit", + "architecture", + "graphdriver", + "kernel", "os", + "os_type", + "daemon_id", // ID is a randomly generated unique identifier (e.g. UUID4) + ) + engineCpus = ns.NewGauge("engine_cpus", "The number of cpus that the host system of the engine has", metrics.Unit("cpus")) + engineMemory = ns.NewGauge("engine_memory", "The number of bytes of memory that the host system of the engine has", metrics.Bytes) + healthChecksCounter = ns.NewCounter("health_checks", "The total number of health checks") + healthChecksFailedCounter = ns.NewCounter("health_checks_failed", "The total number of failed health checks") + imageActions = ns.NewLabeledTimer("image_actions", "The number of seconds it takes to process each image action", "action") + + stateCtr = newStateCounter(ns.NewDesc("container_states", "The count of containers in various states", metrics.Unit("containers"), "state")) + ns.Add(stateCtr) + + metrics.Register(ns) +} + +type stateCounter struct { + mu sync.Mutex + states map[string]string + desc *prometheus.Desc +} + +func newStateCounter(desc *prometheus.Desc) *stateCounter { + return &stateCounter{ + states: make(map[string]string), + desc: desc, + } +} + +func (ctr *stateCounter) get() (running int, paused int, stopped int) { + ctr.mu.Lock() + defer ctr.mu.Unlock() + + states := map[string]int{ + "running": 0, + "paused": 0, + "stopped": 0, + } + for _, state := range ctr.states { + states[state]++ + } + return states["running"], states["paused"], states["stopped"] +} + +func (ctr *stateCounter) set(id, label string) { + ctr.mu.Lock() + ctr.states[id] = label + ctr.mu.Unlock() +} + +func (ctr *stateCounter) del(id string) { + ctr.mu.Lock() + delete(ctr.states, id) + ctr.mu.Unlock() +} + +func (ctr *stateCounter) Describe(ch chan<- *prometheus.Desc) { + ch <- ctr.desc +} + +func (ctr *stateCounter) Collect(ch chan<- prometheus.Metric) { + running, paused, stopped := ctr.get() + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(running), "running") + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(paused), "paused") + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(stopped), "stopped") +} + +func (d *Daemon) cleanupMetricsPlugins() { + ls := d.PluginStore.GetAllManagedPluginsByCap(metricsPluginType) + var wg sync.WaitGroup + wg.Add(len(ls)) + + for _, p := range ls { + go func() { + defer wg.Done() + pluginStopMetricsCollection(p) + }() + } + wg.Wait() + + if d.metricsPluginListener != nil { + d.metricsPluginListener.Close() + } +} + +type metricsPlugin struct { + plugingetter.CompatPlugin +} + +func (p metricsPlugin) sock() string { + return "metrics.sock" +} + +func (p metricsPlugin) sockBase() string { + return filepath.Join(p.BasePath(), "run", "docker") +} + +func pluginStartMetricsCollection(p plugingetter.CompatPlugin) error { + type metricsPluginResponse struct { + Err string + } + var res metricsPluginResponse + if err := p.Client().Call(metricsPluginType+".StartMetrics", nil, &res); err != nil { + return errors.Wrap(err, "could not start metrics plugin") + } + if res.Err != "" { + return errors.New(res.Err) + } + return nil +} + +func pluginStopMetricsCollection(p plugingetter.CompatPlugin) { + if err := p.Client().Call(metricsPluginType+".StopMetrics", nil, nil); err != nil { + logrus.WithError(err).WithField("name", p.Name()).Error("error stopping metrics collector") + } + + mp := metricsPlugin{p} + sockPath := filepath.Join(mp.sockBase(), mp.sock()) + if err := mount.Unmount(sockPath); err != nil { + if mounted, _ := mount.Mounted(sockPath); mounted { + logrus.WithError(err).WithField("name", p.Name()).WithField("socket", sockPath).Error("error unmounting metrics socket for plugin") + } + } + return +} diff --git a/components/engine/daemon/metrics_unix.go b/components/engine/daemon/metrics_unix.go new file mode 100644 index 0000000000..cda7355e8e --- /dev/null +++ b/components/engine/daemon/metrics_unix.go @@ -0,0 +1,86 @@ +// +build !windows + +package daemon + +import ( + "net" + "net/http" + "os" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + metrics "github.com/docker/go-metrics" + "github.com/pkg/errors" +) + +func (daemon *Daemon) listenMetricsSock() (string, error) { + path := filepath.Join(daemon.configStore.ExecRoot, "metrics.sock") + syscall.Unlink(path) + l, err := net.Listen("unix", path) + if err != nil { + return "", errors.Wrap(err, "error setting up metrics plugin listener") + } + + mux := http.NewServeMux() + mux.Handle("/metrics", metrics.Handler()) + go func() { + http.Serve(l, mux) + }() + daemon.metricsPluginListener = l + return path, nil +} + +func registerMetricsPluginCallback(getter plugingetter.PluginGetter, sockPath string) { + getter.Handle(metricsPluginType, func(name string, client *plugins.Client) { + // Use lookup since nothing in the system can really reference it, no need + // to protect against removal + p, err := getter.Get(name, metricsPluginType, plugingetter.Lookup) + if err != nil { + return + } + + mp := metricsPlugin{p} + sockBase := mp.sockBase() + if err := os.MkdirAll(sockBase, 0755); err != nil { + logrus.WithError(err).WithField("name", name).WithField("path", sockBase).Error("error creating metrics plugin base path") + return + } + + defer func() { + if err != nil { + os.RemoveAll(sockBase) + } + }() + + pluginSockPath := filepath.Join(sockBase, mp.sock()) + _, err = os.Stat(pluginSockPath) + if err == nil { + mount.Unmount(pluginSockPath) + } else { + logrus.WithField("path", pluginSockPath).Debugf("creating plugin socket") + f, err := os.OpenFile(pluginSockPath, os.O_CREATE, 0600) + if err != nil { + return + } + f.Close() + } + + if err := mount.Mount(sockPath, pluginSockPath, "none", "bind,ro"); err != nil { + logrus.WithError(err).WithField("name", name).Error("could not mount metrics socket to plugin") + return + } + + if err := pluginStartMetricsCollection(p); err != nil { + if err := mount.Unmount(pluginSockPath); err != nil { + if mounted, _ := mount.Mounted(pluginSockPath); mounted { + logrus.WithError(err).WithField("sock_path", pluginSockPath).Error("error unmounting metrics socket from plugin during cleanup") + } + } + logrus.WithError(err).WithField("name", name).Error("error while initializing metrics plugin") + } + }) +} diff --git a/components/engine/daemon/metrics_unsupported.go b/components/engine/daemon/metrics_unsupported.go new file mode 100644 index 0000000000..64dc1817a3 --- /dev/null +++ b/components/engine/daemon/metrics_unsupported.go @@ -0,0 +1,12 @@ +// +build windows + +package daemon + +import "github.com/docker/docker/pkg/plugingetter" + +func registerMetricsPluginCallback(getter plugingetter.PluginGetter, sockPath string) { +} + +func (daemon *Daemon) listenMetricsSock() (string, error) { + return "", nil +} diff --git a/components/engine/daemon/monitor.go b/components/engine/daemon/monitor.go new file mode 100644 index 0000000000..6861a5c3c8 --- /dev/null +++ b/components/engine/daemon/monitor.go @@ -0,0 +1,165 @@ +package daemon + +import ( + "errors" + "fmt" + "runtime" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/restartmanager" +) + +func (daemon *Daemon) setStateCounter(c *container.Container) { + switch c.StateString() { + case "paused": + stateCtr.set(c.ID, "paused") + case "running": + stateCtr.set(c.ID, "running") + default: + stateCtr.set(c.ID, "stopped") + } +} + +// StateChanged updates daemon state changes from containerd +func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { + c := daemon.containers.Get(id) + if c == nil { + return fmt.Errorf("no such container: %s", id) + } + + switch e.State { + case libcontainerd.StateOOM: + // StateOOM is Linux specific and should never be hit on Windows + if runtime.GOOS == "windows" { + return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.") + } + daemon.updateHealthMonitor(c) + daemon.LogContainerEvent(c, "oom") + case libcontainerd.StateExit: + + c.Lock() + c.StreamConfig.Wait() + c.Reset(false) + + restart, wait, err := c.RestartManager().ShouldRestart(e.ExitCode, c.HasBeenManuallyStopped, time.Since(c.StartedAt)) + if err == nil && restart { + c.RestartCount++ + c.SetRestarting(platformConstructExitStatus(e)) + } else { + c.SetStopped(platformConstructExitStatus(e)) + defer daemon.autoRemove(c) + } + + // cancel healthcheck here, they will be automatically + // restarted if/when the container is started again + daemon.stopHealthchecks(c) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(e.ExitCode)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + daemon.Cleanup(c) + + if err == nil && restart { + go func() { + err := <-wait + if err == nil { + if err = daemon.containerStart(c, "", "", false); err != nil { + logrus.Debugf("failed to restart container: %+v", err) + } + } + if err != nil { + c.SetStopped(platformConstructExitStatus(e)) + defer daemon.autoRemove(c) + if err != restartmanager.ErrRestartCanceled { + logrus.Errorf("restartmanger wait error: %+v", err) + } + } + }() + } + + daemon.setStateCounter(c) + + defer c.Unlock() + if err := c.ToDisk(); err != nil { + return err + } + return daemon.postRunProcessing(c, e) + case libcontainerd.StateExitProcess: + if execConfig := c.ExecCommands.Get(e.ProcessID); execConfig != nil { + ec := int(e.ExitCode) + execConfig.Lock() + defer execConfig.Unlock() + execConfig.ExitCode = &ec + execConfig.Running = false + execConfig.StreamConfig.Wait() + if err := execConfig.CloseStreams(); err != nil { + logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err) + } + + // remove the exec command from the container's store only and not the + // daemon's store so that the exec command can be inspected. + c.ExecCommands.Delete(execConfig.ID) + } else { + logrus.Warnf("Ignoring StateExitProcess for %v but no exec command found", e) + } + case libcontainerd.StateStart, libcontainerd.StateRestore: + // Container is already locked in this case + c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart) + c.HasBeenManuallyStopped = false + c.HasBeenStartedBefore = true + daemon.setStateCounter(c) + + if err := c.ToDisk(); err != nil { + c.Reset(false) + return err + } + daemon.initHealthMonitor(c) + + daemon.LogContainerEvent(c, "start") + case libcontainerd.StatePause: + // Container is already locked in this case + c.Paused = true + daemon.setStateCounter(c) + if err := c.ToDisk(); err != nil { + return err + } + daemon.updateHealthMonitor(c) + daemon.LogContainerEvent(c, "pause") + case libcontainerd.StateResume: + // Container is already locked in this case + c.Paused = false + daemon.setStateCounter(c) + if err := c.ToDisk(); err != nil { + return err + } + daemon.updateHealthMonitor(c) + daemon.LogContainerEvent(c, "unpause") + } + return nil +} + +func (daemon *Daemon) autoRemove(c *container.Container) { + c.Lock() + ar := c.HostConfig.AutoRemove + c.Unlock() + if !ar { + return + } + + var err error + if err = daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err == nil { + return + } + if c := daemon.containers.Get(c.ID); c == nil { + return + } + + if err != nil { + logrus.WithError(err).WithField("container", c.ID).Error("error removing container") + } +} diff --git a/components/engine/daemon/monitor_linux.go b/components/engine/daemon/monitor_linux.go new file mode 100644 index 0000000000..09f5af50c6 --- /dev/null +++ b/components/engine/daemon/monitor_linux.go @@ -0,0 +1,19 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + OOMKilled: e.OOMKilled, + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + return nil +} diff --git a/components/engine/daemon/monitor_solaris.go b/components/engine/daemon/monitor_solaris.go new file mode 100644 index 0000000000..5ccfada76a --- /dev/null +++ b/components/engine/daemon/monitor_solaris.go @@ -0,0 +1,18 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + return nil +} diff --git a/components/engine/daemon/monitor_windows.go b/components/engine/daemon/monitor_windows.go new file mode 100644 index 0000000000..9648b1b415 --- /dev/null +++ b/components/engine/daemon/monitor_windows.go @@ -0,0 +1,46 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + if e.ExitCode == 0 && e.UpdatePending { + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + newOpts := []libcontainerd.CreateOption{&libcontainerd.ServicingOption{ + IsServicing: true, + }} + + copts, err := daemon.getLibcontainerdCreateOptions(container) + if err != nil { + return err + } + + if copts != nil { + newOpts = append(newOpts, copts...) + } + + // Create a new servicing container, which will start, complete the update, and merge back the + // results if it succeeded, all as part of the below function call. + if err := daemon.containerd.Create((container.ID + "_servicing"), "", "", *spec, container.InitializeStdio, newOpts...); err != nil { + container.SetExitCode(-1) + return fmt.Errorf("Post-run update servicing failed: %s", err) + } + } + return nil +} diff --git a/components/engine/daemon/mounts.go b/components/engine/daemon/mounts.go new file mode 100644 index 0000000000..35c6ed59a6 --- /dev/null +++ b/components/engine/daemon/mounts.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "fmt" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + volumestore "github.com/docker/docker/volume/store" +) + +func (daemon *Daemon) prepareMountPoints(container *container.Container) error { + for _, config := range container.MountPoints { + if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { + var rmErrors []string + for _, m := range container.MountPoints { + if m.Type != mounttypes.TypeVolume || m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + if !rm { + continue + } + + // Do not remove named mountpoints + // these are mountpoints specified like `docker run -v :/foo` + if m.Spec.Source != "" { + continue + } + + err := daemon.volumes.Remove(m.Volume) + // Ignore volume in use errors because having this + // volume being referenced by other container is + // not an error, but an implementation detail. + // This prevents docker from logging "ERROR: Volume in use" + // where there is another container using the volume. + if err != nil && !volumestore.IsInUse(err) { + rmErrors = append(rmErrors, err.Error()) + } + } + + if len(rmErrors) > 0 { + return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) + } + return nil +} diff --git a/components/engine/daemon/names.go b/components/engine/daemon/names.go new file mode 100644 index 0000000000..5ce16624ad --- /dev/null +++ b/components/engine/daemon/names.go @@ -0,0 +1,116 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/stringid" +) + +var ( + validContainerNameChars = api.RestrictedNameChars + validContainerNamePattern = api.RestrictedNamePattern +) + +func (daemon *Daemon) registerName(container *container.Container) error { + if daemon.Exists(container.ID) { + return fmt.Errorf("Container is already loaded") + } + if err := validateID(container.ID); err != nil { + return err + } + if container.Name == "" { + name, err := daemon.generateNewName(container.ID) + if err != nil { + return err + } + container.Name = name + + if err := container.ToDiskLocking(); err != nil { + logrus.Errorf("Error saving container name to disk: %v", err) + } + } + return daemon.nameIndex.Reserve(container.Name, container.ID) +} + +func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { + var ( + err error + id = stringid.GenerateNonCryptoID() + ) + + if name == "" { + if name, err = daemon.generateNewName(id); err != nil { + return "", "", err + } + return id, name, nil + } + + if name, err = daemon.reserveName(id, name); err != nil { + return "", "", err + } + + return id, name, nil +} + +func (daemon *Daemon) reserveName(id, name string) (string, error) { + if !validContainerNamePattern.MatchString(strings.TrimPrefix(name, "/")) { + return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + id, err := daemon.nameIndex.Get(name) + if err != nil { + logrus.Errorf("got unexpected error while looking up reserved name: %v", err) + return "", err + } + return "", fmt.Errorf("Conflict. The container name %q is already in use by container %q. You have to remove (or rename) that container to be able to reuse that name.", name, id) + } + return "", fmt.Errorf("error reserving name: %q, error: %v", name, err) + } + return name, nil +} + +func (daemon *Daemon) releaseName(name string) { + daemon.nameIndex.Release(name) +} + +func (daemon *Daemon) generateNewName(id string) (string, error) { + var name string + for i := 0; i < 6; i++ { + name = namesgenerator.GetRandomName(i) + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + continue + } + return "", err + } + return name, nil + } + + name = "/" + stringid.TruncateID(id) + if err := daemon.nameIndex.Reserve(name, id); err != nil { + return "", err + } + return name, nil +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("Invalid empty id") + } + return nil +} diff --git a/components/engine/daemon/network.go b/components/engine/daemon/network.go new file mode 100644 index 0000000000..366c2a59e4 --- /dev/null +++ b/components/engine/daemon/network.go @@ -0,0 +1,567 @@ +package daemon + +import ( + "fmt" + "net" + "runtime" + "sort" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + lncluster "github.com/docker/libnetwork/cluster" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + networktypes "github.com/docker/libnetwork/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// NetworkControllerEnabled checks if the networking stack is enabled. +// This feature depends on OS primitives and it's disabled in systems like Windows. +func (daemon *Daemon) NetworkControllerEnabled() bool { + return daemon.netController != nil +} + +// FindNetwork function finds a network for a given string that can represent network name or id +func (daemon *Daemon) FindNetwork(idName string) (libnetwork.Network, error) { + // Find by Name + n, err := daemon.GetNetworkByName(idName) + if err != nil && !isNoSuchNetworkError(err) { + return nil, err + } + + if n != nil { + return n, nil + } + + // Find by id + return daemon.GetNetworkByID(idName) +} + +func isNoSuchNetworkError(err error) bool { + _, ok := err.(libnetwork.ErrNoSuchNetwork) + return ok +} + +// GetNetworkByID function returns a network whose ID begins with the given prefix. +// It fails with an error if no matching, or more than one matching, networks are found. +func (daemon *Daemon) GetNetworkByID(partialID string) (libnetwork.Network, error) { + list := daemon.GetNetworksByID(partialID) + + if len(list) == 0 { + return nil, libnetwork.ErrNoSuchNetwork(partialID) + } + if len(list) > 1 { + return nil, libnetwork.ErrInvalidID(partialID) + } + return list[0], nil +} + +// GetNetworkByName function returns a network for a given network name. +// If no network name is given, the default network is returned. +func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) { + c := daemon.netController + if c == nil { + return nil, libnetwork.ErrNoSuchNetwork(name) + } + if name == "" { + name = c.Config().Daemon.DefaultNetwork + } + return c.NetworkByName(name) +} + +// GetNetworksByID returns a list of networks whose ID partially matches zero or more networks +func (daemon *Daemon) GetNetworksByID(partialID string) []libnetwork.Network { + c := daemon.netController + if c == nil { + return nil + } + list := []libnetwork.Network{} + l := func(nw libnetwork.Network) bool { + if strings.HasPrefix(nw.ID(), partialID) { + list = append(list, nw) + } + return false + } + c.WalkNetworks(l) + + return list +} + +// getAllNetworks returns a list containing all networks +func (daemon *Daemon) getAllNetworks() []libnetwork.Network { + return daemon.netController.Networks() +} + +type ingressJob struct { + create *clustertypes.NetworkCreateRequest + ip net.IP + jobDone chan struct{} +} + +var ( + ingressWorkerOnce sync.Once + ingressJobsChannel chan *ingressJob + ingressID string +) + +func (daemon *Daemon) startIngressWorker() { + ingressJobsChannel = make(chan *ingressJob, 100) + go func() { + for { + select { + case r := <-ingressJobsChannel: + if r.create != nil { + daemon.setupIngress(r.create, r.ip, ingressID) + ingressID = r.create.ID + } else { + daemon.releaseIngress(ingressID) + ingressID = "" + } + close(r.jobDone) + } + } + }() +} + +// enqueueIngressJob adds a ingress add/rm request to the worker queue. +// It guarantees the worker is started. +func (daemon *Daemon) enqueueIngressJob(job *ingressJob) { + ingressWorkerOnce.Do(daemon.startIngressWorker) + ingressJobsChannel <- job +} + +// SetupIngress setups ingress networking. +// The function returns a channel which will signal the caller when the programming is completed. +func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) (<-chan struct{}, error) { + ip, _, err := net.ParseCIDR(nodeIP) + if err != nil { + return nil, err + } + done := make(chan struct{}) + daemon.enqueueIngressJob(&ingressJob{&create, ip, done}) + return done, nil +} + +// ReleaseIngress releases the ingress networking. +// The function returns a channel which will signal the caller when the programming is completed. +func (daemon *Daemon) ReleaseIngress() (<-chan struct{}, error) { + done := make(chan struct{}) + daemon.enqueueIngressJob(&ingressJob{nil, nil, done}) + return done, nil +} + +func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip net.IP, staleID string) { + controller := daemon.netController + controller.AgentInitWait() + + if staleID != "" && staleID != create.ID { + daemon.releaseIngress(staleID) + } + + if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil { + // If it is any other error other than already + // exists error log error and return. + if _, ok := err.(libnetwork.NetworkNameError); !ok { + logrus.Errorf("Failed creating ingress network: %v", err) + return + } + // Otherwise continue down the call to create or recreate sandbox. + } + + n, err := daemon.GetNetworkByID(create.ID) + if err != nil { + logrus.Errorf("Failed getting ingress network by id after creating: %v", err) + } + + sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress()) + if err != nil { + if _, ok := err.(networktypes.ForbiddenError); !ok { + logrus.Errorf("Failed creating ingress sandbox: %v", err) + } + return + } + + ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil)) + if err != nil { + logrus.Errorf("Failed creating ingress endpoint: %v", err) + return + } + + if err := ep.Join(sb, nil); err != nil { + logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err) + return + } + + if err := sb.EnableService(); err != nil { + logrus.Errorf("Failed enabling service for ingress sandbox") + } +} + +func (daemon *Daemon) releaseIngress(id string) { + controller := daemon.netController + if err := controller.SandboxDestroy("ingress-sbox"); err != nil { + logrus.Errorf("Failed to delete ingress sandbox: %v", err) + } + + if id == "" { + return + } + + n, err := controller.NetworkByID(id) + if err != nil { + logrus.Errorf("failed to retrieve ingress network %s: %v", id, err) + return + } + + for _, ep := range n.Endpoints() { + if err := ep.Delete(true); err != nil { + logrus.Errorf("Failed to delete endpoint %s (%s): %v", ep.Name(), ep.ID(), err) + return + } + } + + if err := n.Delete(); err != nil { + logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err) + return + } + return +} + +// SetNetworkBootstrapKeys sets the bootstrap keys. +func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error { + err := daemon.netController.SetKeys(keys) + if err == nil { + // Upon successful key setting dispatch the keys available event + daemon.cluster.SendClusterEvent(lncluster.EventNetworkKeysAvailable) + } + return err +} + +// UpdateAttachment notifies the attacher about the attachment config. +func (daemon *Daemon) UpdateAttachment(networkName, networkID, containerID string, config *network.NetworkingConfig) error { + if daemon.clusterProvider == nil { + return fmt.Errorf("cluster provider is not initialized") + } + + if err := daemon.clusterProvider.UpdateAttachment(networkName, containerID, config); err != nil { + return daemon.clusterProvider.UpdateAttachment(networkID, containerID, config) + } + + return nil +} + +// WaitForDetachment makes the cluster manager wait for detachment of +// the container from the network. +func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { + if daemon.clusterProvider == nil { + return fmt.Errorf("cluster provider is not initialized") + } + + return daemon.clusterProvider.WaitForDetachment(ctx, networkName, networkID, taskID, containerID) +} + +// CreateManagedNetwork creates an agent network. +func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error { + _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true) + return err +} + +// CreateNetwork creates a network with the given name, driver and other optional parameters +func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) { + resp, err := daemon.createNetwork(create, "", false) + if err != nil { + return nil, err + } + return resp, err +} + +func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) { + if runconfig.IsPreDefinedNetwork(create.Name) && !agent { + err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name) + return nil, apierrors.NewRequestForbiddenError(err) + } + + var warning string + nw, err := daemon.GetNetworkByName(create.Name) + if err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { + return nil, err + } + } + if nw != nil { + // check if user defined CheckDuplicate, if set true, return err + // otherwise prepare a warning message + if create.CheckDuplicate { + return nil, libnetwork.NetworkNameError(create.Name) + } + warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) + } + + c := daemon.netController + driver := create.Driver + if driver == "" { + driver = c.Config().Daemon.DefaultDriver + } + + nwOptions := []libnetwork.NetworkOption{ + libnetwork.NetworkOptionEnableIPv6(create.EnableIPv6), + libnetwork.NetworkOptionDriverOpts(create.Options), + libnetwork.NetworkOptionLabels(create.Labels), + libnetwork.NetworkOptionAttachable(create.Attachable), + libnetwork.NetworkOptionIngress(create.Ingress), + libnetwork.NetworkOptionScope(create.Scope), + } + + if create.ConfigOnly { + nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigOnly()) + } + + if create.IPAM != nil { + ipam := create.IPAM + v4Conf, v6Conf, err := getIpamConfig(ipam.Config) + if err != nil { + return nil, err + } + nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options)) + } + + if create.Internal { + nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork()) + } + if agent { + nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic()) + nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false)) + } + + if create.ConfigFrom != nil { + nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigFrom(create.ConfigFrom.Network)) + } + + n, err := c.NewNetwork(driver, create.Name, id, nwOptions...) + if err != nil { + if _, ok := err.(libnetwork.ErrDataStoreNotInitialized); ok { + return nil, errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } + return nil, err + } + + daemon.pluginRefCount(driver, driverapi.NetworkPluginEndpointType, plugingetter.Acquire) + if create.IPAM != nil { + daemon.pluginRefCount(create.IPAM.Driver, ipamapi.PluginEndpointType, plugingetter.Acquire) + } + daemon.LogNetworkEvent(n, "create") + + return &types.NetworkCreateResponse{ + ID: n.ID(), + Warning: warning, + }, nil +} + +func (daemon *Daemon) pluginRefCount(driver, capability string, mode int) { + var builtinDrivers []string + + if capability == driverapi.NetworkPluginEndpointType { + builtinDrivers = daemon.netController.BuiltinDrivers() + } else if capability == ipamapi.PluginEndpointType { + builtinDrivers = daemon.netController.BuiltinIPAMDrivers() + } + + for _, d := range builtinDrivers { + if d == driver { + return + } + } + + if daemon.PluginStore != nil { + _, err := daemon.PluginStore.Get(driver, capability, mode) + if err != nil { + logrus.WithError(err).WithFields(logrus.Fields{"mode": mode, "driver": driver}).Error("Error handling plugin refcount operation") + } + } +} + +func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) { + ipamV4Cfg := []*libnetwork.IpamConf{} + ipamV6Cfg := []*libnetwork.IpamConf{} + for _, d := range data { + iCfg := libnetwork.IpamConf{} + iCfg.PreferredPool = d.Subnet + iCfg.SubPool = d.IPRange + iCfg.Gateway = d.Gateway + iCfg.AuxAddresses = d.AuxAddress + ip, _, err := net.ParseCIDR(d.Subnet) + if err != nil { + return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err) + } + if ip.To4() != nil { + ipamV4Cfg = append(ipamV4Cfg, &iCfg) + } else { + ipamV6Cfg = append(ipamV6Cfg, &iCfg) + } + } + return ipamV4Cfg, ipamV6Cfg, nil +} + +// UpdateContainerServiceConfig updates a service configuration. +func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + + container.NetworkSettings.Service = serviceConfig + return nil +} + +// ConnectContainerToNetwork connects the given container to the given +// network. If either cannot be found, an err is returned. If the +// network cannot be set up, an err is returned. +func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error { + if runtime.GOOS == "solaris" { + return errors.New("docker network connect is unsupported on Solaris platform") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + return daemon.ConnectToNetwork(container, networkName, endpointConfig) +} + +// DisconnectContainerFromNetwork disconnects the given container from +// the given network. If either cannot be found, an err is returned. +func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error { + if runtime.GOOS == "solaris" { + return errors.New("docker network disconnect is unsupported on Solaris platform") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + if force { + return daemon.ForceEndpointDelete(containerName, networkName) + } + return err + } + return daemon.DisconnectFromNetwork(container, networkName, force) +} + +// GetNetworkDriverList returns the list of plugins drivers +// registered for network. +func (daemon *Daemon) GetNetworkDriverList() []string { + if !daemon.NetworkControllerEnabled() { + return nil + } + + pluginList := daemon.netController.BuiltinDrivers() + + managedPlugins := daemon.PluginStore.GetAllManagedPluginsByCap(driverapi.NetworkPluginEndpointType) + + for _, plugin := range managedPlugins { + pluginList = append(pluginList, plugin.Name()) + } + + pluginMap := make(map[string]bool) + for _, plugin := range pluginList { + pluginMap[plugin] = true + } + + networks := daemon.netController.Networks() + + for _, network := range networks { + if !pluginMap[network.Type()] { + pluginList = append(pluginList, network.Type()) + pluginMap[network.Type()] = true + } + } + + sort.Strings(pluginList) + + return pluginList +} + +// DeleteManagedNetwork deletes an agent network. +func (daemon *Daemon) DeleteManagedNetwork(networkID string) error { + return daemon.deleteNetwork(networkID, true) +} + +// DeleteNetwork destroys a network unless it's one of docker's predefined networks. +func (daemon *Daemon) DeleteNetwork(networkID string) error { + return daemon.deleteNetwork(networkID, false) +} + +func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error { + nw, err := daemon.FindNetwork(networkID) + if err != nil { + return err + } + + if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic { + err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name()) + return apierrors.NewRequestForbiddenError(err) + } + + if dynamic && !nw.Info().Dynamic() { + if runconfig.IsPreDefinedNetwork(nw.Name()) { + // Predefined networks now support swarm services. Make this + // a no-op when cluster requests to remove the predefined network. + return nil + } + err := fmt.Errorf("%s is not a dynamic network", nw.Name()) + return apierrors.NewRequestForbiddenError(err) + } + + if err := nw.Delete(); err != nil { + return err + } + + // If this is not a configuration only network, we need to + // update the corresponding remote drivers' reference counts + if !nw.Info().ConfigOnly() { + daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.Release) + ipamType, _, _, _ := nw.Info().IpamConfig() + daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.Release) + daemon.LogNetworkEvent(nw, "destroy") + } + + return nil +} + +// GetNetworks returns a list of all networks +func (daemon *Daemon) GetNetworks() []libnetwork.Network { + return daemon.getAllNetworks() +} + +// clearAttachableNetworks removes the attachable networks +// after disconnecting any connected container +func (daemon *Daemon) clearAttachableNetworks() { + for _, n := range daemon.GetNetworks() { + if !n.Info().Attachable() { + continue + } + for _, ep := range n.Endpoints() { + epInfo := ep.Info() + if epInfo == nil { + continue + } + sb := epInfo.Sandbox() + if sb == nil { + continue + } + containerID := sb.ContainerID() + if err := daemon.DisconnectContainerFromNetwork(containerID, n.ID(), true); err != nil { + logrus.Warnf("Failed to disconnect container %s from swarm network %s on cluster leave: %v", + containerID, n.Name(), err) + } + } + if err := daemon.DeleteManagedNetwork(n.ID()); err != nil { + logrus.Warnf("Failed to remove swarm network %s on cluster leave: %v", n.Name(), err) + } + } +} diff --git a/components/engine/daemon/network/settings.go b/components/engine/daemon/network/settings.go new file mode 100644 index 0000000000..8f6b7dd59e --- /dev/null +++ b/components/engine/daemon/network/settings.go @@ -0,0 +1,33 @@ +package network + +import ( + networktypes "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/go-connections/nat" +) + +// Settings stores configuration details about the daemon network config +// TODO Windows. Many of these fields can be factored out., +type Settings struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Networks map[string]*EndpointSettings + Service *clustertypes.ServiceConfig + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []networktypes.Address + SecondaryIPv6Addresses []networktypes.Address + IsAnonymousEndpoint bool + HasSwarmEndpoint bool +} + +// EndpointSettings is a package local wrapper for +// networktypes.EndpointSettings which stores Endpoint state that +// needs to be persisted to disk but not exposed in the api. +type EndpointSettings struct { + *networktypes.EndpointSettings + IPAMOperational bool +} diff --git a/components/engine/daemon/oci_linux.go b/components/engine/daemon/oci_linux.go new file mode 100644 index 0000000000..55a6cd8ae0 --- /dev/null +++ b/components/engine/daemon/oci_linux.go @@ -0,0 +1,839 @@ +package daemon + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + daemonconfig "github.com/docker/docker/daemon/config" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/volume" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/user" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +var ( + deviceCgroupRuleRegex = regexp.MustCompile("^([acb]) ([0-9]+|\\*):([0-9]+|\\*) ([rwm]{1,3})$") +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + weightDevices, err := getBlkioWeightDevices(r) + if err != nil { + return err + } + readBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadBps) + if err != nil { + return err + } + writeBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteBps) + if err != nil { + return err + } + readIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadIOps) + if err != nil { + return err + } + writeIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteIOps) + if err != nil { + return err + } + + memoryRes := getMemoryResources(r) + cpuRes, err := getCPUResources(r) + if err != nil { + return err + } + blkioWeight := r.BlkioWeight + + specResources := &specs.LinuxResources{ + Memory: memoryRes, + CPU: cpuRes, + BlockIO: &specs.LinuxBlockIO{ + Weight: &blkioWeight, + WeightDevice: weightDevices, + ThrottleReadBpsDevice: readBpsDevice, + ThrottleWriteBpsDevice: writeBpsDevice, + ThrottleReadIOPSDevice: readIOpsDevice, + ThrottleWriteIOPSDevice: writeIOpsDevice, + }, + DisableOOMKiller: r.OomKillDisable, + Pids: &specs.LinuxPids{ + Limit: r.PidsLimit, + }, + } + + if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 { + specResources.Devices = s.Linux.Resources.Devices + } + + s.Linux.Resources = specResources + return nil +} + +func setDevices(s *specs.Spec, c *container.Container) error { + // Build lists of devices allowed and created within the container. + var devs []specs.LinuxDevice + devPermissions := s.Linux.Resources.Devices + if c.HostConfig.Privileged { + hostDevices, err := devices.HostDevices() + if err != nil { + return err + } + for _, d := range hostDevices { + devs = append(devs, oci.Device(d)) + } + devPermissions = []specs.LinuxDeviceCgroup{ + { + Allow: true, + Access: "rwm", + }, + } + } else { + for _, deviceMapping := range c.HostConfig.Devices { + d, dPermissions, err := oci.DevicesFromPath(deviceMapping.PathOnHost, deviceMapping.PathInContainer, deviceMapping.CgroupPermissions) + if err != nil { + return err + } + devs = append(devs, d...) + devPermissions = append(devPermissions, dPermissions...) + } + + for _, deviceCgroupRule := range c.HostConfig.DeviceCgroupRules { + ss := deviceCgroupRuleRegex.FindAllStringSubmatch(deviceCgroupRule, -1) + if len(ss[0]) != 5 { + return fmt.Errorf("invalid device cgroup rule format: '%s'", deviceCgroupRule) + } + matches := ss[0] + + dPermissions := specs.LinuxDeviceCgroup{ + Allow: true, + Type: matches[1], + Access: matches[4], + } + if matches[2] == "*" { + major := int64(-1) + dPermissions.Major = &major + } else { + major, err := strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return fmt.Errorf("invalid major value in device cgroup rule format: '%s'", deviceCgroupRule) + } + dPermissions.Major = &major + } + if matches[3] == "*" { + minor := int64(-1) + dPermissions.Minor = &minor + } else { + minor, err := strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return fmt.Errorf("invalid minor value in device cgroup rule format: '%s'", deviceCgroupRule) + } + dPermissions.Minor = &minor + } + devPermissions = append(devPermissions, dPermissions) + } + } + + s.Linux.Devices = append(s.Linux.Devices, devs...) + s.Linux.Resources.Devices = devPermissions + return nil +} + +func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error { + var rlimits []specs.LinuxRlimit + + // We want to leave the original HostConfig alone so make a copy here + hostConfig := *c.HostConfig + // Merge with the daemon defaults + daemon.mergeUlimits(&hostConfig) + for _, ul := range hostConfig.Ulimits { + rlimits = append(rlimits, specs.LinuxRlimit{ + Type: "RLIMIT_" + strings.ToUpper(ul.Name), + Soft: uint64(ul.Soft), + Hard: uint64(ul.Hard), + }) + } + + s.Process.Rlimits = rlimits + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { + fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS) + if err != nil { + return nil, err + } + return os.Open(fp) +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + passwdPath, err := user.GetPasswdPath() + if err != nil { + return 0, 0, nil, err + } + groupPath, err := user.GetGroupPath() + if err != nil { + return 0, 0, nil, err + } + passwdFile, err := readUserFile(c, passwdPath) + if err == nil { + defer passwdFile.Close() + } + groupFile, err := readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + + execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) + if err != nil { + return 0, 0, nil, err + } + + // todo: fix this double read by a change to libcontainer/user pkg + groupFile, err = readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + var addGroups []int + if len(c.HostConfig.GroupAdd) > 0 { + addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile) + if err != nil { + return 0, 0, nil, err + } + } + uid := uint32(execUser.Uid) + gid := uint32(execUser.Gid) + sgids := append(execUser.Sgids, addGroups...) + var additionalGids []uint32 + for _, g := range sgids { + additionalGids = append(additionalGids, uint32(g)) + } + return uid, gid, additionalGids, nil +} + +func setNamespace(s *specs.Spec, ns specs.LinuxNamespace) { + for i, n := range s.Linux.Namespaces { + if n.Type == ns.Type { + s.Linux.Namespaces[i] = ns + return + } + } + s.Linux.Namespaces = append(s.Linux.Namespaces, ns) +} + +func setCapabilities(s *specs.Spec, c *container.Container) error { + var caplist []string + var err error + if c.HostConfig.Privileged { + caplist = caps.GetAllCapabilities() + } else { + caplist, err = caps.TweakCapabilities(s.Process.Capabilities.Effective, c.HostConfig.CapAdd, c.HostConfig.CapDrop) + if err != nil { + return err + } + } + s.Process.Capabilities.Effective = caplist + s.Process.Capabilities.Bounding = caplist + s.Process.Capabilities.Permitted = caplist + s.Process.Capabilities.Inheritable = caplist + return nil +} + +func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error { + userNS := false + // user + if c.HostConfig.UsernsMode.IsPrivate() { + uidMap, gidMap := daemon.GetUIDGIDMaps() + if uidMap != nil { + userNS = true + ns := specs.LinuxNamespace{Type: "user"} + setNamespace(s, ns) + s.Linux.UIDMappings = specMapping(uidMap) + s.Linux.GIDMappings = specMapping(gidMap) + } + } + // network + if !c.Config.NetworkDisabled { + ns := specs.LinuxNamespace{Type: "network"} + parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) + if parts[0] == "container" { + nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) + if userNS { + // to share a net namespace, they must also share a user namespace + nsUser := specs.LinuxNamespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.NetworkMode.IsHost() { + ns.Path = c.NetworkSettings.SandboxKey + } + setNamespace(s, ns) + } + // ipc + if c.HostConfig.IpcMode.IsContainer() { + ns := specs.LinuxNamespace{Type: "ipc"} + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share an IPC namespace, they must also share a user namespace + nsUser := specs.LinuxNamespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.IpcMode.IsHost() { + oci.RemoveNamespace(s, specs.LinuxNamespaceType("ipc")) + } else { + ns := specs.LinuxNamespace{Type: "ipc"} + setNamespace(s, ns) + } + // pid + if c.HostConfig.PidMode.IsContainer() { + ns := specs.LinuxNamespace{Type: "pid"} + pc, err := daemon.getPidContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/pid", pc.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share a PID namespace, they must also share a user namespace + nsUser := specs.LinuxNamespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.PidMode.IsHost() { + oci.RemoveNamespace(s, specs.LinuxNamespaceType("pid")) + } else { + ns := specs.LinuxNamespace{Type: "pid"} + setNamespace(s, ns) + } + // uts + if c.HostConfig.UTSMode.IsHost() { + oci.RemoveNamespace(s, specs.LinuxNamespaceType("uts")) + s.Hostname = "" + } + + return nil +} + +func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping { + var ids []specs.LinuxIDMapping + for _, item := range s { + ids = append(ids, specs.LinuxIDMapping{ + HostID: uint32(item.HostID), + ContainerID: uint32(item.ContainerID), + Size: uint32(item.Size), + }) + } + return ids +} + +func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { + for _, m := range mountinfo { + if m.Mountpoint == dir { + return m + } + } + return nil +} + +// Get the source mount point of directory passed in as argument. Also return +// optional fields. +func getSourceMount(source string) (string, string, error) { + // Ensure any symlinks are resolved. + sourcePath, err := filepath.EvalSymlinks(source) + if err != nil { + return "", "", err + } + + mountinfos, err := mount.GetMounts() + if err != nil { + return "", "", err + } + + mountinfo := getMountInfo(mountinfos, sourcePath) + if mountinfo != nil { + return sourcePath, mountinfo.Optional, nil + } + + path := sourcePath + for { + path = filepath.Dir(path) + + mountinfo = getMountInfo(mountinfos, path) + if mountinfo != nil { + return path, mountinfo.Optional, nil + } + + if path == "/" { + break + } + } + + // If we are here, we did not find parent mount. Something is wrong. + return "", "", fmt.Errorf("Could not find source mount of %s", source) +} + +// Ensure mount point on which path is mounted, is shared. +func ensureShared(path string) error { + sharedMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } + } + + if !sharedMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount) + } + return nil +} + +// Ensure mount point on which path is mounted, is either shared or slave. +func ensureSharedOrSlave(path string) error { + sharedMount := false + slaveMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } else if strings.HasPrefix(opt, "master:") { + slaveMount = true + break + } + } + + if !sharedMount && !slaveMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount) + } + return nil +} + +var ( + mountPropagationMap = map[string]int{ + "private": mount.PRIVATE, + "rprivate": mount.RPRIVATE, + "shared": mount.SHARED, + "rshared": mount.RSHARED, + "slave": mount.SLAVE, + "rslave": mount.RSLAVE, + } + + mountPropagationReverseMap = map[int]string{ + mount.PRIVATE: "private", + mount.RPRIVATE: "rprivate", + mount.SHARED: "shared", + mount.RSHARED: "rshared", + mount.SLAVE: "slave", + mount.RSLAVE: "rslave", + } +) + +func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error { + userMounts := make(map[string]struct{}) + for _, m := range mounts { + userMounts[m.Destination] = struct{}{} + } + + // Filter out mounts that are overridden by user supplied mounts + var defaultMounts []specs.Mount + _, mountDev := userMounts["/dev"] + for _, m := range s.Mounts { + if _, ok := userMounts[m.Destination]; !ok { + if mountDev && strings.HasPrefix(m.Destination, "/dev/") { + continue + } + defaultMounts = append(defaultMounts, m) + } + } + + s.Mounts = defaultMounts + for _, m := range mounts { + for _, cm := range s.Mounts { + if cm.Destination == m.Destination { + return fmt.Errorf("Duplicate mount point '%s'", m.Destination) + } + } + + if m.Source == "tmpfs" { + data := m.Data + options := []string{"noexec", "nosuid", "nodev", string(volume.DefaultPropagationMode)} + if data != "" { + options = append(options, strings.Split(data, ",")...) + } + + merged, err := mount.MergeTmpfsOptions(options) + if err != nil { + return err + } + + s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: merged}) + continue + } + + mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"} + + // Determine property of RootPropagation based on volume + // properties. If a volume is shared, then keep root propagation + // shared. This should work for slave and private volumes too. + // + // For slave volumes, it can be either [r]shared/[r]slave. + // + // For private volumes any root propagation value should work. + pFlag := mountPropagationMap[m.Propagation] + if pFlag == mount.SHARED || pFlag == mount.RSHARED { + if err := ensureShared(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] + } + } else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE { + if err := ensureSharedOrSlave(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] + } + } + + opts := []string{"rbind"} + if !m.Writable { + opts = append(opts, "ro") + } + if pFlag != 0 { + opts = append(opts, mountPropagationReverseMap[pFlag]) + } + + mt.Options = opts + s.Mounts = append(s.Mounts, mt) + } + + if s.Root.Readonly { + for i, m := range s.Mounts { + switch m.Destination { + case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc + continue + } + if _, ok := userMounts[m.Destination]; !ok { + if !stringutils.InSlice(m.Options, "ro") { + s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") + } + } + } + } + + if c.HostConfig.Privileged { + if !s.Root.Readonly { + // clear readonly for /sys + for i := range s.Mounts { + if s.Mounts[i].Destination == "/sys" { + clearReadOnly(&s.Mounts[i]) + } + } + } + s.Linux.ReadonlyPaths = nil + s.Linux.MaskedPaths = nil + } + + // TODO: until a kernel/mount solution exists for handling remount in a user namespace, + // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) + if uidMap, _ := daemon.GetUIDGIDMaps(); uidMap != nil || c.HostConfig.Privileged { + for i, m := range s.Mounts { + if m.Type == "cgroup" { + clearReadOnly(&s.Mounts[i]) + } + } + } + + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: c.BaseFS, + Readonly: c.HostConfig.ReadonlyRootfs, + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + cwd := c.Config.WorkingDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Args = append([]string{c.Path}, c.Args...) + + // only add the custom init if it is specified and the container is running in its + // own private pid namespace. It does not make sense to add if it is running in the + // host namespace or another container's pid namespace where we already have an init + if c.HostConfig.PidMode.IsPrivate() { + if (c.HostConfig.Init != nil && *c.HostConfig.Init) || + (c.HostConfig.Init == nil && daemon.configStore.Init) { + s.Process.Args = append([]string{"/dev/init", "--", c.Path}, c.Args...) + var path string + if daemon.configStore.InitPath == "" { + path, err = exec.LookPath(daemonconfig.DefaultInitBinary) + if err != nil { + return err + } + } + if daemon.configStore.InitPath != "" { + path = daemon.configStore.InitPath + } + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: "/dev/init", + Type: "bind", + Source: path, + Options: []string{"bind", "ro"}, + }) + } + } + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + var cgroupsPath string + scopePrefix := "docker" + parent := "/docker" + useSystemd := UsingSystemd(daemon.configStore) + if useSystemd { + parent = "system.slice" + } + + if c.HostConfig.CgroupParent != "" { + parent = c.HostConfig.CgroupParent + } else if daemon.configStore.CgroupParent != "" { + parent = daemon.configStore.CgroupParent + } + + if useSystemd { + cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID + logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) + } else { + cgroupsPath = filepath.Join(parent, c.ID) + } + s.Linux.CgroupsPath = cgroupsPath + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("linux runtime spec resources: %v", err) + } + s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj + s.Linux.Sysctl = c.HostConfig.Sysctls + + p := s.Linux.CgroupsPath + if useSystemd { + initPath, err := cgroups.GetInitCgroup("cpu") + if err != nil { + return nil, err + } + p, _ = cgroups.GetOwnCgroup("cpu") + if err != nil { + return nil, err + } + p = filepath.Join(initPath, p) + } + + // Clean path to guard against things like ../../../BAD + parentPath := filepath.Dir(p) + if !filepath.IsAbs(parentPath) { + parentPath = filepath.Clean("/" + parentPath) + } + + if err := daemon.initCgroupsPath(parentPath); err != nil { + return nil, fmt.Errorf("linux init cgroups path: %v", err) + } + if err := setDevices(&s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec devices: %v", err) + } + if err := setRlimits(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) + } + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("linux spec user: %v", err) + } + if err := setNamespaces(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux spec namespaces: %v", err) + } + if err := setCapabilities(&s, c); err != nil { + return nil, fmt.Errorf("linux spec capabilities: %v", err) + } + if err := setSeccomp(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux seccomp: %v", err) + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + if err := daemon.setupSecretDir(c); err != nil { + return nil, err + } + + if err := daemon.setupConfigDir(c); err != nil { + return nil, err + } + + ms, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + + ms = append(ms, c.IpcMounts()...) + + tmpfsMounts, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + ms = append(ms, tmpfsMounts...) + + if m := c.SecretMounts(); m != nil { + ms = append(ms, m...) + } + + ms = append(ms, c.ConfigMounts()...) + + sort.Sort(mounts(ms)) + if err := setMounts(daemon, &s, c, ms); err != nil { + return nil, fmt.Errorf("linux mounts: %v", err) + } + + for _, ns := range s.Linux.Namespaces { + if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { + target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) + if err != nil { + return nil, err + } + + s.Hooks = &specs.Hooks{ + Prestart: []specs.Hook{{ + Path: target, // FIXME: cross-platform + Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()}, + }}, + } + } + } + + if apparmor.IsEnabled() { + var appArmorProfile string + if c.AppArmorProfile != "" { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + appArmorProfile = "unconfined" + } else { + appArmorProfile = "docker-default" + } + + if appArmorProfile == "docker-default" { + // Unattended upgrades and other fun services can unload AppArmor + // profiles inadvertently. Since we cannot store our profile in + // /etc/apparmor.d, nor can we practically add other ways of + // telling the system to keep our profile loaded, in order to make + // sure that we keep the default profile enabled we dynamically + // reload it if necessary. + if err := ensureDefaultAppArmorProfile(); err != nil { + return nil, err + } + } + + s.Process.ApparmorProfile = appArmorProfile + } + s.Process.SelinuxLabel = c.GetProcessLabel() + s.Process.NoNewPrivileges = c.NoNewPrivileges + s.Linux.MountLabel = c.MountLabel + + return (*specs.Spec)(&s), nil +} + +func clearReadOnly(m *specs.Mount) { + var opt []string + for _, o := range m.Options { + if o != "ro" { + opt = append(opt, o) + } + } + m.Options = opt +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + ulimits := c.Ulimits + // Merge ulimits with daemon defaults + ulIdx := make(map[string]struct{}) + for _, ul := range ulimits { + ulIdx[ul.Name] = struct{}{} + } + for name, ul := range daemon.configStore.Ulimits { + if _, exists := ulIdx[name]; !exists { + ulimits = append(ulimits, ul) + } + } + c.Ulimits = ulimits +} diff --git a/components/engine/daemon/oci_solaris.go b/components/engine/daemon/oci_solaris.go new file mode 100644 index 0000000000..0c757f9196 --- /dev/null +++ b/components/engine/daemon/oci_solaris.go @@ -0,0 +1,188 @@ +package daemon + +import ( + "fmt" + "path/filepath" + "sort" + "strconv" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/oci" + "github.com/docker/libnetwork" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + mem := getMemoryResources(r) + s.Solaris.CappedMemory = &mem + + capCPU := getCPUResources(r) + s.Solaris.CappedCPU = &capCPU + + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + return 0, 0, nil, nil +} + +func (daemon *Daemon) getRunzAnet(ep libnetwork.Endpoint) (specs.Anet, error) { + var ( + linkName string + lowerLink string + defRouter string + ) + + epInfo := ep.Info() + if epInfo == nil { + return specs.Anet{}, fmt.Errorf("invalid endpoint") + } + + nw, err := daemon.GetNetworkByName(ep.Network()) + if err != nil { + return specs.Anet{}, fmt.Errorf("Failed to get network %s: %v", ep.Network(), err) + } + + // Evaluate default router, linkname and lowerlink for interface endpoint + switch nw.Type() { + case "bridge": + defRouter = epInfo.Gateway().String() + linkName = "net0" // Should always be net0 for a container + + // TODO We construct lowerlink here exactly as done for solaris bridge + // initialization. Need modular code to reuse. + options := nw.Info().DriverOptions() + nwName := options["com.docker.network.bridge.name"] + lastChar := nwName[len(nwName)-1:] + if _, err = strconv.Atoi(lastChar); err != nil { + lowerLink = nwName + "_0" + } else { + lowerLink = nwName + } + + case "overlay": + defRouter = "" + linkName = "net1" + + // TODO Follows generateVxlanName() in solaris overlay. + id := nw.ID() + if len(nw.ID()) > 12 { + id = nw.ID()[:12] + } + lowerLink = "vx_" + id + "_0" + } + + runzanet := specs.Anet{ + Linkname: linkName, + Lowerlink: lowerLink, + Allowedaddr: epInfo.Iface().Address().String(), + Configallowedaddr: "true", + Defrouter: defRouter, + Linkprotection: "mac-nospoof, ip-nospoof", + Macaddress: epInfo.Iface().MacAddress().String(), + } + + return runzanet, nil +} + +func (daemon *Daemon) setNetworkInterface(s *specs.Spec, c *container.Container) error { + var anets []specs.Anet + + sb, err := daemon.netController.SandboxByID(c.NetworkSettings.SandboxID) + if err != nil { + return fmt.Errorf("Could not obtain sandbox for container") + } + + // Populate interfaces required for each endpoint + for _, ep := range sb.Endpoints() { + runzanet, err := daemon.getRunzAnet(ep) + if err != nil { + return fmt.Errorf("Failed to get interface information for endpoint %d: %v", ep.ID(), err) + } + anets = append(anets, runzanet) + } + + s.Solaris.Anet = anets + if anets != nil { + s.Solaris.Milestone = "svc:/milestone/container:default" + } + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: filepath.Dir(c.BaseFS), + Readonly: c.HostConfig.ReadonlyRootfs, + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + cwd := c.Config.WorkingDir + s.Process.Args = append([]string{c.Path}, c.Args...) + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("runtime spec resources: %v", err) + } + + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("spec user: %v", err) + } + + if err := daemon.setNetworkInterface(&s, c); err != nil { + return nil, err + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + ms, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + ms = append(ms, c.IpcMounts()...) + tmpfsMounts, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + ms = append(ms, tmpfsMounts...) + sort.Sort(mounts(ms)) + + return (*specs.Spec)(&s), nil +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +// It will do nothing on non-Linux platform +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + return +} diff --git a/components/engine/daemon/oci_windows.go b/components/engine/daemon/oci_windows.go new file mode 100644 index 0000000000..0d1d08d263 --- /dev/null +++ b/components/engine/daemon/oci_windows.go @@ -0,0 +1,172 @@ +package daemon + +import ( + "syscall" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/sysinfo" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return nil, err + } + + // Note, unlike Unix, we do NOT call into SetupWorkingDirectory as + // this is done in VMCompute. Further, we couldn't do it for Hyper-V + // containers anyway. + + // In base spec + s.Hostname = c.FullHostname() + + if err := daemon.setupSecretDir(c); err != nil { + return nil, err + } + + if err := daemon.setupConfigDir(c); err != nil { + return nil, err + } + + // In s.Mounts + mounts, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + + var isHyperV bool + if c.HostConfig.Isolation.IsDefault() { + // Container using default isolation, so take the default from the daemon configuration + isHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container may be requesting an explicit isolation mode. + isHyperV = c.HostConfig.Isolation.IsHyperV() + } + + // If the container has not been started, and has configs or secrets + // secrets, create symlinks to each confing and secret. If it has been + // started before, the symlinks should have already been created. Also, it + // is important to not mount a Hyper-V container that has been started + // before, to protect the host from the container; for example, from + // malicious mutation of NTFS data structures. + if !c.HasBeenStartedBefore && (len(c.SecretReferences) > 0 || len(c.ConfigReferences) > 0) { + // The container file system is mounted before this function is called, + // except for Hyper-V containers, so mount it here in that case. + if isHyperV { + if err := daemon.Mount(c); err != nil { + return nil, err + } + defer daemon.Unmount(c) + } + if err := c.CreateSecretSymlinks(); err != nil { + return nil, err + } + if err := c.CreateConfigSymlinks(); err != nil { + return nil, err + } + } + + if m := c.SecretMounts(); m != nil { + mounts = append(mounts, m...) + } + + if m := c.ConfigMounts(); m != nil { + mounts = append(mounts, m...) + } + + for _, mount := range mounts { + m := specs.Mount{ + Source: mount.Source, + Destination: mount.Destination, + } + if !mount.Writable { + m.Options = append(m.Options, "ro") + } + s.Mounts = append(s.Mounts, m) + } + + // In s.Process + s.Process.Args = append([]string{c.Path}, c.Args...) + if !c.Config.ArgsEscaped { + s.Process.Args = escapeArgs(s.Process.Args) + } + s.Process.Cwd = c.Config.WorkingDir + if len(s.Process.Cwd) == 0 { + // We default to C:\ to workaround the oddity of the case that the + // default directory for cmd running as LocalSystem (or + // ContainerAdministrator) is c:\windows\system32. Hence docker run + // cmd will by default end in c:\windows\system32, rather + // than 'root' (/) on Linux. The oddity is that if you have a dockerfile + // which has no WORKDIR and has a COPY file ., . will be interpreted + // as c:\. Hence, setting it to default of c:\ makes for consistency. + s.Process.Cwd = `C:\` + } + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.ConsoleSize.Height = c.HostConfig.ConsoleSize[0] + s.Process.ConsoleSize.Width = c.HostConfig.ConsoleSize[1] + s.Process.Terminal = c.Config.Tty + s.Process.User.Username = c.Config.User + + // In spec.Root. This is not set for Hyper-V containers + if !isHyperV { + s.Root.Path = c.BaseFS + } + s.Root.Readonly = false // Windows does not support a read-only root filesystem + + // In s.Windows.Resources + cpuShares := uint16(c.HostConfig.CPUShares) + cpuPercent := uint8(c.HostConfig.CPUPercent) + cpuCount := uint64(c.HostConfig.CPUCount) + if c.HostConfig.NanoCPUs > 0 { + if isHyperV { + cpuCount = uint64(c.HostConfig.NanoCPUs / 1e9) + leftoverNanoCPUs := c.HostConfig.NanoCPUs % 1e9 + if leftoverNanoCPUs != 0 { + cpuCount++ + cpuPercent = uint8(c.HostConfig.NanoCPUs * 100 / int64(cpuCount) / 1e9) + } + } else { + cpuPercent = uint8(c.HostConfig.NanoCPUs * 100 / int64(sysinfo.NumCPU()) / 1e9) + + if cpuPercent < 1 { + // The requested NanoCPUs is so small that we rounded to 0, use 1 instead + cpuPercent = 1 + } + } + } + memoryLimit := uint64(c.HostConfig.Memory) + s.Windows.Resources = &specs.WindowsResources{ + CPU: &specs.WindowsCPUResources{ + Percent: &cpuPercent, + Shares: &cpuShares, + Count: &cpuCount, + }, + Memory: &specs.WindowsMemoryResources{ + Limit: &memoryLimit, + }, + Storage: &specs.WindowsStorageResources{ + Bps: &c.HostConfig.IOMaximumBandwidth, + Iops: &c.HostConfig.IOMaximumIOps, + }, + } + return (*specs.Spec)(&s), nil +} + +func escapeArgs(args []string) []string { + escapedArgs := make([]string, len(args)) + for i, a := range args { + escapedArgs[i] = syscall.EscapeArg(a) + } + return escapedArgs +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +// It will do nothing on non-Linux platform +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + return +} diff --git a/components/engine/daemon/pause.go b/components/engine/daemon/pause.go new file mode 100644 index 0000000000..dbfafbc5fd --- /dev/null +++ b/components/engine/daemon/pause.go @@ -0,0 +1,49 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerPause pauses a container +func (daemon *Daemon) ContainerPause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerPause(container); err != nil { + return err + } + + return nil +} + +// containerPause pauses the container execution without stopping the process. +// The execution can be resumed by calling containerUnpause. +func (daemon *Daemon) containerPause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot Pause the container which is not running + if !container.Running { + return errNotRunning{container.ID} + } + + // We cannot Pause the container which is already paused + if container.Paused { + return fmt.Errorf("Container %s is already paused", container.ID) + } + + // We cannot Pause the container which is restarting + if container.Restarting { + return errContainerIsRestarting(container.ID) + } + + if err := daemon.containerd.Pause(container.ID); err != nil { + return fmt.Errorf("Cannot pause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/components/engine/daemon/prune.go b/components/engine/daemon/prune.go new file mode 100644 index 0000000000..c614de2fe1 --- /dev/null +++ b/components/engine/daemon/prune.go @@ -0,0 +1,463 @@ +package daemon + +import ( + "fmt" + "regexp" + "sync/atomic" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/libnetwork" + digest "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +var ( + // errPruneRunning is returned when a prune request is received while + // one is in progress + errPruneRunning = fmt.Errorf("a prune operation is already running") + + containersAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + "until": true, + } + volumesAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + } + imagesAcceptedFilters = map[string]bool{ + "dangling": true, + "label": true, + "label!": true, + "until": true, + } + networksAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + "until": true, + } +) + +// ContainersPrune removes unused containers +func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + rep := &types.ContainersPruneReport{} + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(containersAcceptedFilters) + if err != nil { + return nil, err + } + + until, err := getUntilFromPruneFilters(pruneFilters) + if err != nil { + return nil, err + } + + allContainers := daemon.List() + for _, c := range allContainers { + select { + case <-ctx.Done(): + logrus.Warnf("ContainersPrune operation cancelled: %#v", *rep) + return rep, ctx.Err() + default: + } + + if !c.IsRunning() { + if !until.IsZero() && c.Created.After(until) { + continue + } + if !matchLabels(pruneFilters, c.Config.Labels) { + continue + } + cSize, _ := daemon.getSize(c.ID) + // TODO: sets RmLink to true? + err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{}) + if err != nil { + logrus.Warnf("failed to prune container %s: %v", c.ID, err) + continue + } + if cSize > 0 { + rep.SpaceReclaimed += uint64(cSize) + } + rep.ContainersDeleted = append(rep.ContainersDeleted, c.ID) + } + } + + return rep, nil +} + +// VolumesPrune removes unused local volumes +func (daemon *Daemon) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(volumesAcceptedFilters) + if err != nil { + return nil, err + } + + rep := &types.VolumesPruneReport{} + + pruneVols := func(v volume.Volume) error { + select { + case <-ctx.Done(): + logrus.Warnf("VolumesPrune operation cancelled: %#v", *rep) + return ctx.Err() + default: + } + + name := v.Name() + refs := daemon.volumes.Refs(v) + + if len(refs) == 0 { + detailedVolume, ok := v.(volume.DetailedVolume) + if ok { + if !matchLabels(pruneFilters, detailedVolume.Labels()) { + return nil + } + } + vSize, err := directory.Size(v.Path()) + if err != nil { + logrus.Warnf("could not determine size of volume %s: %v", name, err) + } + err = daemon.volumes.Remove(v) + if err != nil { + logrus.Warnf("could not remove volume %s: %v", name, err) + return nil + } + rep.SpaceReclaimed += uint64(vSize) + rep.VolumesDeleted = append(rep.VolumesDeleted, name) + } + + return nil + } + + err = daemon.traverseLocalVolumes(pruneVols) + + return rep, err +} + +// ImagesPrune removes unused images +func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(imagesAcceptedFilters) + if err != nil { + return nil, err + } + + rep := &types.ImagesPruneReport{} + + danglingOnly := true + if pruneFilters.Include("dangling") { + if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { + danglingOnly = false + } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", pruneFilters.Get("dangling")) + } + } + + until, err := getUntilFromPruneFilters(pruneFilters) + if err != nil { + return nil, err + } + + var allImages map[image.ID]*image.Image + if danglingOnly { + allImages = daemon.imageStore.Heads() + } else { + allImages = daemon.imageStore.Map() + } + allContainers := daemon.List() + imageRefs := map[string]bool{} + for _, c := range allContainers { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + imageRefs[c.ID] = true + } + } + + // Filter intermediary images and get their unique size + allLayers := daemon.layerStore.Map() + topImages := map[image.ID]*image.Image{} + for id, img := range allImages { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + dgst := digest.Digest(id) + if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 { + continue + } + if !until.IsZero() && img.Created.After(until) { + continue + } + if !matchLabels(pruneFilters, img.Config.Labels) { + continue + } + topImages[id] = img + } + } + + canceled := false +deleteImagesLoop: + for id := range topImages { + select { + case <-ctx.Done(): + // we still want to calculate freed size and return the data + canceled = true + break deleteImagesLoop + default: + } + + dgst := digest.Digest(id) + hex := dgst.Hex() + if _, ok := imageRefs[hex]; ok { + continue + } + + deletedImages := []types.ImageDeleteResponseItem{} + refs := daemon.referenceStore.References(dgst) + if len(refs) > 0 { + shouldDelete := !danglingOnly + if !shouldDelete { + hasTag := false + for _, ref := range refs { + if _, ok := ref.(reference.NamedTagged); ok { + hasTag = true + break + } + } + + // Only delete if it's untagged (i.e. repo:) + shouldDelete = !hasTag + } + + if shouldDelete { + for _, ref := range refs { + imgDel, err := daemon.ImageDelete(ref.String(), false, true) + if err != nil { + logrus.Warnf("could not delete reference %s: %v", ref.String(), err) + continue + } + deletedImages = append(deletedImages, imgDel...) + } + } + } else { + imgDel, err := daemon.ImageDelete(hex, false, true) + if err != nil { + logrus.Warnf("could not delete image %s: %v", hex, err) + continue + } + deletedImages = append(deletedImages, imgDel...) + } + + rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) + } + + // Compute how much space was freed + for _, d := range rep.ImagesDeleted { + if d.Deleted != "" { + chid := layer.ChainID(d.Deleted) + if l, ok := allLayers[chid]; ok { + diffSize, err := l.DiffSize() + if err != nil { + logrus.Warnf("failed to get layer %s size: %v", chid, err) + continue + } + rep.SpaceReclaimed += uint64(diffSize) + } + } + } + + if canceled { + logrus.Warnf("ImagesPrune operation cancelled: %#v", *rep) + return nil, ctx.Err() + } + + return rep, nil +} + +// localNetworksPrune removes unused local networks +func (daemon *Daemon) localNetworksPrune(ctx context.Context, pruneFilters filters.Args) *types.NetworksPruneReport { + rep := &types.NetworksPruneReport{} + + until, _ := getUntilFromPruneFilters(pruneFilters) + + // When the function returns true, the walk will stop. + l := func(nw libnetwork.Network) bool { + select { + case <-ctx.Done(): + return true + default: + } + if nw.Info().ConfigOnly() { + return false + } + if !until.IsZero() && nw.Info().Created().After(until) { + return false + } + if !matchLabels(pruneFilters, nw.Info().Labels()) { + return false + } + nwName := nw.Name() + if runconfig.IsPreDefinedNetwork(nwName) { + return false + } + if len(nw.Endpoints()) > 0 { + return false + } + if err := daemon.DeleteNetwork(nw.ID()); err != nil { + logrus.Warnf("could not remove local network %s: %v", nwName, err) + return false + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nwName) + return false + } + daemon.netController.WalkNetworks(l) + return rep +} + +// clusterNetworksPrune removes unused cluster networks +func (daemon *Daemon) clusterNetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + rep := &types.NetworksPruneReport{} + + until, _ := getUntilFromPruneFilters(pruneFilters) + + cluster := daemon.GetCluster() + + if !cluster.IsManager() { + return rep, nil + } + + networks, err := cluster.GetNetworks() + if err != nil { + return rep, err + } + networkIsInUse := regexp.MustCompile(`network ([[:alnum:]]+) is in use`) + for _, nw := range networks { + select { + case <-ctx.Done(): + return rep, ctx.Err() + default: + if nw.Ingress { + // Routing-mesh network removal has to be explicitly invoked by user + continue + } + if !until.IsZero() && nw.Created.After(until) { + continue + } + if !matchLabels(pruneFilters, nw.Labels) { + continue + } + // https://github.com/docker/docker/issues/24186 + // `docker network inspect` unfortunately displays ONLY those containers that are local to that node. + // So we try to remove it anyway and check the error + err = cluster.RemoveNetwork(nw.ID) + if err != nil { + // we can safely ignore the "network .. is in use" error + match := networkIsInUse.FindStringSubmatch(err.Error()) + if len(match) != 2 || match[1] != nw.ID { + logrus.Warnf("could not remove cluster network %s: %v", nw.Name, err) + } + continue + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nw.Name) + } + } + return rep, nil +} + +// NetworksPrune removes unused networks +func (daemon *Daemon) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(networksAcceptedFilters) + if err != nil { + return nil, err + } + + if _, err := getUntilFromPruneFilters(pruneFilters); err != nil { + return nil, err + } + + rep := &types.NetworksPruneReport{} + if clusterRep, err := daemon.clusterNetworksPrune(ctx, pruneFilters); err == nil { + rep.NetworksDeleted = append(rep.NetworksDeleted, clusterRep.NetworksDeleted...) + } + + localRep := daemon.localNetworksPrune(ctx, pruneFilters) + rep.NetworksDeleted = append(rep.NetworksDeleted, localRep.NetworksDeleted...) + + select { + case <-ctx.Done(): + logrus.Warnf("NetworksPrune operation cancelled: %#v", *rep) + return nil, ctx.Err() + default: + } + + return rep, nil +} + +func getUntilFromPruneFilters(pruneFilters filters.Args) (time.Time, error) { + until := time.Time{} + if !pruneFilters.Include("until") { + return until, nil + } + untilFilters := pruneFilters.Get("until") + if len(untilFilters) > 1 { + return until, fmt.Errorf("more than one until filter specified") + } + ts, err := timetypes.GetTimestamp(untilFilters[0], time.Now()) + if err != nil { + return until, err + } + seconds, nanoseconds, err := timetypes.ParseTimestamps(ts, 0) + if err != nil { + return until, err + } + until = time.Unix(seconds, nanoseconds) + return until, nil +} + +func matchLabels(pruneFilters filters.Args, labels map[string]string) bool { + if !pruneFilters.MatchKVList("label", labels) { + return false + } + // By default MatchKVList will return true if field (like 'label!') does not exist + // So we have to add additional Include("label!") check + if pruneFilters.Include("label!") { + if pruneFilters.MatchKVList("label!", labels) { + return false + } + } + return true +} diff --git a/components/engine/daemon/reload.go b/components/engine/daemon/reload.go new file mode 100644 index 0000000000..e47455946e --- /dev/null +++ b/components/engine/daemon/reload.go @@ -0,0 +1,312 @@ +package daemon + +import ( + "encoding/json" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/discovery" + "github.com/docker/docker/libcontainerd" +) + +// Reload reads configuration changes and modifies the +// daemon according to those changes. +// These are the settings that Reload changes: +// - Platform runtime +// - Daemon debug log level +// - Daemon max concurrent downloads +// - Daemon max concurrent uploads +// - Daemon shutdown timeout (in seconds) +// - Cluster discovery (reconfigure and restart) +// - Daemon labels +// - Insecure registries +// - Registry mirrors +// - Daemon live restore +func (daemon *Daemon) Reload(conf *config.Config) (err error) { + daemon.configStore.Lock() + attributes := map[string]string{} + + defer func() { + // we're unlocking here, because + // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() + // holds that lock too. + daemon.configStore.Unlock() + if err == nil { + daemon.LogDaemonEventWithAttributes("reload", attributes) + } + }() + + daemon.reloadPlatform(conf, attributes) + daemon.reloadDebug(conf, attributes) + daemon.reloadMaxConcurrentDowloadsAndUploads(conf, attributes) + daemon.reloadShutdownTimeout(conf, attributes) + + if err := daemon.reloadClusterDiscovery(conf, attributes); err != nil { + return err + } + if err := daemon.reloadLabels(conf, attributes); err != nil { + return err + } + if err := daemon.reloadAllowNondistributableArtifacts(conf, attributes); err != nil { + return err + } + if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil { + return err + } + if err := daemon.reloadRegistryMirrors(conf, attributes); err != nil { + return err + } + if err := daemon.reloadLiveRestore(conf, attributes); err != nil { + return err + } + return nil +} + +// reloadDebug updates configuration with Debug option +// and updates the passed attributes +func (daemon *Daemon) reloadDebug(conf *config.Config, attributes map[string]string) { + // update corresponding configuration + if conf.IsValueSet("debug") { + daemon.configStore.Debug = conf.Debug + } + // prepare reload event attributes with updatable configurations + attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) +} + +// reloadMaxConcurrentDowloadsAndUploads updates configuration with max concurrent +// download and upload options and updates the passed attributes +func (daemon *Daemon) reloadMaxConcurrentDowloadsAndUploads(conf *config.Config, attributes map[string]string) { + // If no value is set for max-concurrent-downloads we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != nil { + *daemon.configStore.MaxConcurrentDownloads = *conf.MaxConcurrentDownloads + } else { + maxConcurrentDownloads := config.DefaultMaxConcurrentDownloads + daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads + } + logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) + if daemon.downloadManager != nil { + daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) + } + + // prepare reload event attributes with updatable configurations + attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) + + // If no value is set for max-concurrent-upload we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if conf.IsValueSet("max-concurrent-uploads") && conf.MaxConcurrentUploads != nil { + *daemon.configStore.MaxConcurrentUploads = *conf.MaxConcurrentUploads + } else { + maxConcurrentUploads := config.DefaultMaxConcurrentUploads + daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads + } + logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) + if daemon.uploadManager != nil { + daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) + } + + // prepare reload event attributes with updatable configurations + attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) +} + +// reloadShutdownTimeout updates configuration with daemon shutdown timeout option +// and updates the passed attributes +func (daemon *Daemon) reloadShutdownTimeout(conf *config.Config, attributes map[string]string) { + // update corresponding configuration + if conf.IsValueSet("shutdown-timeout") { + daemon.configStore.ShutdownTimeout = conf.ShutdownTimeout + logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) + } + + // prepare reload event attributes with updatable configurations + attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) +} + +// reloadClusterDiscovery updates configuration with cluster discovery options +// and updates the passed attributes +func (daemon *Daemon) reloadClusterDiscovery(conf *config.Config, attributes map[string]string) (err error) { + defer func() { + // prepare reload event attributes with updatable configurations + attributes["cluster-store"] = conf.ClusterStore + attributes["cluster-advertise"] = conf.ClusterAdvertise + + attributes["cluster-store-opts"] = "{}" + if daemon.configStore.ClusterOpts != nil { + opts, err2 := json.Marshal(conf.ClusterOpts) + if err != nil { + err = err2 + } + attributes["cluster-store-opts"] = string(opts) + } + }() + + newAdvertise := conf.ClusterAdvertise + newClusterStore := daemon.configStore.ClusterStore + if conf.IsValueSet("cluster-advertise") { + if conf.IsValueSet("cluster-store") { + newClusterStore = conf.ClusterStore + } + newAdvertise, err = config.ParseClusterAdvertiseSettings(newClusterStore, conf.ClusterAdvertise) + if err != nil && err != discovery.ErrDiscoveryDisabled { + return err + } + } + + if daemon.clusterProvider != nil { + if err := conf.IsSwarmCompatible(); err != nil { + return err + } + } + + // check discovery modifications + if !config.ModifiedDiscoverySettings(daemon.configStore, newClusterStore, newAdvertise, conf.ClusterOpts) { + return nil + } + + // enable discovery for the first time if it was not previously enabled + if daemon.discoveryWatcher == nil { + discoveryWatcher, err := discovery.Init(newClusterStore, newAdvertise, conf.ClusterOpts) + if err != nil { + return fmt.Errorf("failed to initialize discovery: %v", err) + } + daemon.discoveryWatcher = discoveryWatcher + } else if err == discovery.ErrDiscoveryDisabled { + // disable discovery if it was previously enabled and it's disabled now + daemon.discoveryWatcher.Stop() + } else if err = daemon.discoveryWatcher.Reload(conf.ClusterStore, newAdvertise, conf.ClusterOpts); err != nil { + // reload discovery + return err + } + + daemon.configStore.ClusterStore = newClusterStore + daemon.configStore.ClusterOpts = conf.ClusterOpts + daemon.configStore.ClusterAdvertise = newAdvertise + + if daemon.netController == nil { + return nil + } + netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) + if err != nil { + logrus.WithError(err).Warnf("failed to get options with network controller") + return nil + } + err = daemon.netController.ReloadConfiguration(netOptions...) + if err != nil { + logrus.Warnf("Failed to reload configuration with network controller: %v", err) + } + return nil +} + +// reloadLabels updates configuration with engine labels +// and updates the passed attributes +func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("labels") { + daemon.configStore.Labels = conf.Labels + } + + // prepare reload event attributes with updatable configurations + if daemon.configStore.Labels != nil { + labels, err := json.Marshal(daemon.configStore.Labels) + if err != nil { + return err + } + attributes["labels"] = string(labels) + } else { + attributes["labels"] = "[]" + } + + return nil +} + +// reloadAllowNondistributableArtifacts updates the configuration with allow-nondistributable-artifacts options +// and updates the passed attributes. +func (daemon *Daemon) reloadAllowNondistributableArtifacts(conf *config.Config, attributes map[string]string) error { + // Update corresponding configuration. + if conf.IsValueSet("allow-nondistributable-artifacts") { + daemon.configStore.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts + if err := daemon.RegistryService.LoadAllowNondistributableArtifacts(conf.AllowNondistributableArtifacts); err != nil { + return err + } + } + + // Prepare reload event attributes with updatable configurations. + if daemon.configStore.AllowNondistributableArtifacts != nil { + v, err := json.Marshal(daemon.configStore.AllowNondistributableArtifacts) + if err != nil { + return err + } + attributes["allow-nondistributable-artifacts"] = string(v) + } else { + attributes["allow-nondistributable-artifacts"] = "[]" + } + + return nil +} + +// reloadInsecureRegistries updates configuration with insecure registry option +// and updates the passed attributes +func (daemon *Daemon) reloadInsecureRegistries(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("insecure-registries") { + daemon.configStore.InsecureRegistries = conf.InsecureRegistries + if err := daemon.RegistryService.LoadInsecureRegistries(conf.InsecureRegistries); err != nil { + return err + } + } + + // prepare reload event attributes with updatable configurations + if daemon.configStore.InsecureRegistries != nil { + insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) + if err != nil { + return err + } + attributes["insecure-registries"] = string(insecureRegistries) + } else { + attributes["insecure-registries"] = "[]" + } + + return nil +} + +// reloadRegistryMirrors updates configuration with registry mirror options +// and updates the passed attributes +func (daemon *Daemon) reloadRegistryMirrors(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("registry-mirrors") { + daemon.configStore.Mirrors = conf.Mirrors + if err := daemon.RegistryService.LoadMirrors(conf.Mirrors); err != nil { + return err + } + } + + // prepare reload event attributes with updatable configurations + if daemon.configStore.Mirrors != nil { + mirrors, err := json.Marshal(daemon.configStore.Mirrors) + if err != nil { + return err + } + attributes["registry-mirrors"] = string(mirrors) + } else { + attributes["registry-mirrors"] = "[]" + } + + return nil +} + +// reloadLiveRestore updates configuration with live retore option +// and updates the passed attributes +func (daemon *Daemon) reloadLiveRestore(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("live-restore") { + daemon.configStore.LiveRestoreEnabled = conf.LiveRestoreEnabled + if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(conf.LiveRestoreEnabled)); err != nil { + return err + } + } + + // prepare reload event attributes with updatable configurations + attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) + return nil +} diff --git a/components/engine/daemon/reload_test.go b/components/engine/daemon/reload_test.go new file mode 100644 index 0000000000..bf11b6bd56 --- /dev/null +++ b/components/engine/daemon/reload_test.go @@ -0,0 +1,474 @@ +// +build !solaris + +package daemon + +import ( + "reflect" + "sort" + "testing" + "time" + + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/pkg/discovery" + _ "github.com/docker/docker/pkg/discovery/memory" + "github.com/docker/docker/registry" +) + +func TestDaemonReloadLabels(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:bar"}, + }, + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:baz"}, + ValuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } +} + +func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) { + daemon := &Daemon{ + configStore: &config.Config{}, + } + + // Initialize daemon with some registries. + daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + AllowNondistributableArtifacts: []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.22:5000", // This will be removed during reload. + "docker1.com", + "docker2.com", // This will be removed during reload. + }, + }) + + registries := []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.33:5000", // This will be added during reload. + "docker1.com", + "docker3.com", // This will be added during reload. + } + + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + AllowNondistributableArtifacts: registries, + }, + ValuesSet: map[string]interface{}{ + "allow-nondistributable-artifacts": registries, + }, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + actual := []string{} + serviceConfig := daemon.RegistryService.ServiceConfig() + for _, value := range serviceConfig.AllowNondistributableArtifactsCIDRs { + actual = append(actual, value.String()) + } + for _, value := range serviceConfig.AllowNondistributableArtifactsHostnames { + actual = append(actual, value) + } + + sort.Strings(registries) + sort.Strings(actual) + if !reflect.DeepEqual(registries, actual) { + t.Fatalf("expected %v, got %v\n", registries, actual) + } +} + +func TestDaemonReloadMirrors(t *testing.T) { + daemon := &Daemon{} + daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + InsecureRegistries: []string{}, + Mirrors: []string{ + "https://mirror.test1.com", + "https://mirror.test2.com", // this will be removed when reloading + "https://mirror.test3.com", // this will be removed when reloading + }, + }) + + daemon.configStore = &config.Config{} + + type pair struct { + valid bool + mirrors []string + after []string + } + + loadMirrors := []pair{ + { + valid: false, + mirrors: []string{"10.10.1.11:5000"}, // this mirror is invalid + after: []string{}, + }, + { + valid: false, + mirrors: []string{"mirror.test1.com"}, // this mirror is invalid + after: []string{}, + }, + { + valid: false, + mirrors: []string{"10.10.1.11:5000", "mirror.test1.com"}, // mirrors are invalid + after: []string{}, + }, + { + valid: true, + mirrors: []string{"https://mirror.test1.com", "https://mirror.test4.com"}, + after: []string{"https://mirror.test1.com/", "https://mirror.test4.com/"}, + }, + } + + for _, value := range loadMirrors { + valuesSets := make(map[string]interface{}) + valuesSets["registry-mirrors"] = value.mirrors + + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + Mirrors: value.mirrors, + }, + ValuesSet: valuesSets, + }, + } + + err := daemon.Reload(newConfig) + if !value.valid && err == nil { + // mirrors should be invalid, should be a non-nil error + t.Fatalf("Expected daemon reload error with invalid mirrors: %s, while get nil", value.mirrors) + } + + if value.valid { + if err != nil { + // mirrors should be valid, should be no error + t.Fatal(err) + } + registryService := daemon.RegistryService.ServiceConfig() + + if len(registryService.Mirrors) != len(value.after) { + t.Fatalf("Expected %d daemon mirrors %s while get %d with %s", + len(value.after), + value.after, + len(registryService.Mirrors), + registryService.Mirrors) + } + + dataMap := map[string]struct{}{} + + for _, mirror := range registryService.Mirrors { + if _, exist := dataMap[mirror]; !exist { + dataMap[mirror] = struct{}{} + } + } + + for _, address := range value.after { + if _, exist := dataMap[address]; !exist { + t.Fatalf("Expected %s in daemon mirrors, while get none", address) + } + } + } + } +} + +func TestDaemonReloadInsecureRegistries(t *testing.T) { + daemon := &Daemon{} + // initialize daemon with existing insecure registries: "127.0.0.0/8", "10.10.1.11:5000", "10.10.1.22:5000" + daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + InsecureRegistries: []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.22:5000", // this will be removed when reloading + "docker1.com", + "docker2.com", // this will be removed when reloading + }, + }) + + daemon.configStore = &config.Config{} + + insecureRegistries := []string{ + "127.0.0.0/8", // this will be kept + "10.10.1.11:5000", // this will be kept + "10.10.1.33:5000", // this will be newly added + "docker1.com", // this will be kept + "docker3.com", // this will be newly added + } + + valuesSets := make(map[string]interface{}) + valuesSets["insecure-registries"] = insecureRegistries + + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + InsecureRegistries: insecureRegistries, + }, + ValuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + // After Reload, daemon.RegistryService will be changed which is useful + // for registry communication in daemon. + registries := daemon.RegistryService.ServiceConfig() + + // After Reload(), newConfig has come to registries.InsecureRegistryCIDRs and registries.IndexConfigs in daemon. + // Then collect registries.InsecureRegistryCIDRs in dataMap. + // When collecting, we need to convert CIDRS into string as a key, + // while the times of key appears as value. + dataMap := map[string]int{} + for _, value := range registries.InsecureRegistryCIDRs { + if _, ok := dataMap[value.String()]; !ok { + dataMap[value.String()] = 1 + } else { + dataMap[value.String()]++ + } + } + + for _, value := range registries.IndexConfigs { + if _, ok := dataMap[value.Name]; !ok { + dataMap[value.Name] = 1 + } else { + dataMap[value.Name]++ + } + } + + // Finally compare dataMap with the original insecureRegistries. + // Each value in insecureRegistries should appear in daemon's insecure registries, + // and each can only appear exactly ONCE. + for _, r := range insecureRegistries { + if value, ok := dataMap[r]; !ok { + t.Fatalf("Expected daemon insecure registry %s, got none", r) + } else if value != 1 { + t.Fatalf("Expected only 1 daemon insecure registry %s, got %d", r, value) + } + } + + // assert if "10.10.1.22:5000" is removed when reloading + if value, ok := dataMap["10.10.1.22:5000"]; ok { + t.Fatalf("Expected no insecure registry of 10.10.1.22:5000, got %d", value) + } + + // assert if "docker2.com" is removed when reloading + if value, ok := dataMap["docker2.com"]; ok { + t.Fatalf("Expected no insecure registry of docker2.com, got %d", value) + } +} + +func TestDaemonReloadNotAffectOthers(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:bar"}, + Debug: true, + }, + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:baz"}, + ValuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } + debug := daemon.configStore.Debug + if !debug { + t.Fatal("Expected debug 'enabled', got 'disabled'") + } +} + +func TestDaemonDiscoveryReload(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1", + ClusterAdvertise: "127.0.0.1:3333", + }, + } + + if err := daemon.initDiscovery(daemon.configStore); err != nil { + t.Fatal(err) + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "3333"}, + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } + + valuesSets := make(map[string]interface{}) + valuesSets["cluster-store"] = "memory://127.0.0.1:2222" + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + ValuesSet: valuesSets, + }, + } + + expected = discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + ch, errCh = daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{} + + valuesSet := make(map[string]interface{}) + valuesSet["cluster-store"] = "memory://127.0.0.1:2222" + valuesSet["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + ValuesSet: valuesSet, + }, + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1", + }, + } + valuesSets := make(map[string]interface{}) + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterAdvertise: "127.0.0.1:5555", + ValuesSet: valuesSets, + }, + } + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-daemon.discoveryWatcher.ReadyCh(): + case <-time.After(10 * time.Second): + t.Fatal("Timeout waiting for discovery") + } + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} diff --git a/components/engine/daemon/rename.go b/components/engine/daemon/rename.go new file mode 100644 index 0000000000..2770ead80c --- /dev/null +++ b/components/engine/daemon/rename.go @@ -0,0 +1,123 @@ +package daemon + +import ( + "errors" + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + dockercontainer "github.com/docker/docker/container" + "github.com/docker/libnetwork" +) + +// ContainerRename changes the name of a container, using the oldName +// to find the container. An error is returned if newName is already +// reserved. +func (daemon *Daemon) ContainerRename(oldName, newName string) error { + var ( + sid string + sb libnetwork.Sandbox + ) + + if oldName == "" || newName == "" { + return errors.New("Neither old nor new names may be empty") + } + + if newName[0] != '/' { + newName = "/" + newName + } + + container, err := daemon.GetContainer(oldName) + if err != nil { + return err + } + + oldName = container.Name + oldIsAnonymousEndpoint := container.NetworkSettings.IsAnonymousEndpoint + + if oldName == newName { + return errors.New("Renaming a container with the same name as its current name") + } + + container.Lock() + defer container.Unlock() + + links := map[string]*dockercontainer.Container{} + for k, v := range daemon.linkIndex.children(container) { + if !strings.HasPrefix(k, oldName) { + return fmt.Errorf("Linked container %s does not match parent %s", k, oldName) + } + links[strings.TrimPrefix(k, oldName)] = v + } + + if newName, err = daemon.reserveName(container.ID, newName); err != nil { + return fmt.Errorf("Error when allocating new name: %v", err) + } + + for k, v := range links { + daemon.nameIndex.Reserve(newName+k, v.ID) + daemon.linkIndex.link(container, v, newName+k) + } + + container.Name = newName + container.NetworkSettings.IsAnonymousEndpoint = false + + defer func() { + if err != nil { + container.Name = oldName + container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint + daemon.reserveName(container.ID, oldName) + for k, v := range links { + daemon.nameIndex.Reserve(oldName+k, v.ID) + daemon.linkIndex.link(container, v, oldName+k) + daemon.linkIndex.unlink(newName+k, v, container) + daemon.nameIndex.Release(newName + k) + } + daemon.releaseName(newName) + } + }() + + for k, v := range links { + daemon.linkIndex.unlink(oldName+k, v, container) + daemon.nameIndex.Release(oldName + k) + } + daemon.releaseName(oldName) + if err = container.ToDisk(); err != nil { + return err + } + + attributes := map[string]string{ + "oldName": oldName, + } + + if !container.Running { + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil + } + + defer func() { + if err != nil { + container.Name = oldName + container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint + if e := container.ToDisk(); e != nil { + logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) + } + } + }() + + sid = container.NetworkSettings.SandboxID + if sid != "" && daemon.netController != nil { + sb, err = daemon.netController.SandboxByID(sid) + if err != nil { + return err + } + + err = sb.Rename(strings.TrimPrefix(container.Name, "/")) + if err != nil { + return err + } + } + + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil +} diff --git a/components/engine/daemon/resize.go b/components/engine/daemon/resize.go new file mode 100644 index 0000000000..747353852e --- /dev/null +++ b/components/engine/daemon/resize.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/libcontainerd" +) + +// ContainerResize changes the size of the TTY of the process running +// in the container with the given name to the given height and width. +func (daemon *Daemon) ContainerResize(name string, height, width int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + if err = daemon.containerd.Resize(container.ID, libcontainerd.InitFriendlyName, width, height); err == nil { + attributes := map[string]string{ + "height": fmt.Sprintf("%d", height), + "width": fmt.Sprintf("%d", width), + } + daemon.LogContainerEventWithAttributes(container, "resize", attributes) + } + return err +} + +// ContainerExecResize changes the size of the TTY of the process +// running in the exec with the given name to the given height and +// width. +func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { + ec, err := daemon.getExecConfig(name) + if err != nil { + return err + } + return daemon.containerd.Resize(ec.ContainerID, ec.ID, width, height) +} diff --git a/components/engine/daemon/restart.go b/components/engine/daemon/restart.go new file mode 100644 index 0000000000..79292f3752 --- /dev/null +++ b/components/engine/daemon/restart.go @@ -0,0 +1,70 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" +) + +// ContainerRestart stops and starts a container. It attempts to +// gracefully stop the container within the given timeout, forcefully +// stopping it if the timeout is exceeded. If given a negative +// timeout, ContainerRestart will wait forever until a graceful +// stop. Returns an error if the container cannot be found, or if +// there is an underlying error at any stage of the restart. +func (daemon *Daemon) ContainerRestart(name string, seconds *int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if seconds == nil { + stopTimeout := container.StopTimeout() + seconds = &stopTimeout + } + if err := daemon.containerRestart(container, *seconds); err != nil { + return fmt.Errorf("Cannot restart container %s: %v", name, err) + } + return nil + +} + +// containerRestart attempts to gracefully stop and then start the +// container. When stopping, wait for the given duration in seconds to +// gracefully stop, before forcefully terminating the container. If +// given a negative duration, wait forever for a graceful stop. +func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := daemon.Mount(container); err == nil { + defer daemon.Unmount(container) + } + + if container.IsRunning() { + // set AutoRemove flag to false before stop so the container won't be + // removed during restart process + autoRemove := container.HostConfig.AutoRemove + + container.HostConfig.AutoRemove = false + err := daemon.containerStop(container, seconds) + // restore AutoRemove irrespective of whether the stop worked or not + container.HostConfig.AutoRemove = autoRemove + // containerStop will write HostConfig to disk, we shall restore AutoRemove + // in disk too + if toDiskErr := container.ToDiskLocking(); toDiskErr != nil { + logrus.Errorf("Write container to disk error: %v", toDiskErr) + } + + if err != nil { + return err + } + } + + if err := daemon.containerStart(container, "", "", true); err != nil { + return err + } + + daemon.LogContainerEvent(container, "restart") + return nil +} diff --git a/components/engine/daemon/search.go b/components/engine/daemon/search.go new file mode 100644 index 0000000000..5d2ac5d222 --- /dev/null +++ b/components/engine/daemon/search.go @@ -0,0 +1,94 @@ +package daemon + +import ( + "fmt" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/dockerversion" +) + +var acceptedSearchFilterTags = map[string]bool{ + "is-automated": true, + "is-official": true, + "stars": true, +} + +// SearchRegistryForImages queries the registry for images matching +// term. authConfig is used to login. +func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, + authConfig *types.AuthConfig, + headers map[string][]string) (*registrytypes.SearchResults, error) { + + searchFilters, err := filters.FromParam(filtersArgs) + if err != nil { + return nil, err + } + if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { + return nil, err + } + + var isAutomated, isOfficial bool + var hasStarFilter = 0 + if searchFilters.Include("is-automated") { + if searchFilters.UniqueExactMatch("is-automated", "true") { + isAutomated = true + } else if !searchFilters.UniqueExactMatch("is-automated", "false") { + return nil, fmt.Errorf("Invalid filter 'is-automated=%s'", searchFilters.Get("is-automated")) + } + } + if searchFilters.Include("is-official") { + if searchFilters.UniqueExactMatch("is-official", "true") { + isOfficial = true + } else if !searchFilters.UniqueExactMatch("is-official", "false") { + return nil, fmt.Errorf("Invalid filter 'is-official=%s'", searchFilters.Get("is-official")) + } + } + if searchFilters.Include("stars") { + hasStars := searchFilters.Get("stars") + for _, hasStar := range hasStars { + iHasStar, err := strconv.Atoi(hasStar) + if err != nil { + return nil, fmt.Errorf("Invalid filter 'stars=%s'", hasStar) + } + if iHasStar > hasStarFilter { + hasStarFilter = iHasStar + } + } + } + + unfilteredResult, err := daemon.RegistryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) + if err != nil { + return nil, err + } + + filteredResults := []registrytypes.SearchResult{} + for _, result := range unfilteredResult.Results { + if searchFilters.Include("is-automated") { + if isAutomated != result.IsAutomated { + continue + } + } + if searchFilters.Include("is-official") { + if isOfficial != result.IsOfficial { + continue + } + } + if searchFilters.Include("stars") { + if result.StarCount < hasStarFilter { + continue + } + } + filteredResults = append(filteredResults, result) + } + + return ®istrytypes.SearchResults{ + Query: unfilteredResult.Query, + NumResults: len(filteredResults), + Results: filteredResults, + }, nil +} diff --git a/components/engine/daemon/search_test.go b/components/engine/daemon/search_test.go new file mode 100644 index 0000000000..85237192e9 --- /dev/null +++ b/components/engine/daemon/search_test.go @@ -0,0 +1,358 @@ +package daemon + +import ( + "errors" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/registry" +) + +type FakeService struct { + registry.DefaultService + + shouldReturnError bool + + term string + results []registrytypes.SearchResult +} + +func (s *FakeService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { + if s.shouldReturnError { + return nil, errors.New("Search unknown error") + } + return ®istrytypes.SearchResults{ + Query: s.term, + NumResults: len(s.results), + Results: s.results, + }, nil +} + +func TestSearchRegistryForImagesErrors(t *testing.T) { + errorCases := []struct { + filtersArgs string + shouldReturnError bool + expectedError string + }{ + { + expectedError: "Search unknown error", + shouldReturnError: true, + }, + { + filtersArgs: "invalid json", + expectedError: "invalid character 'i' looking for beginning of value", + }, + { + filtersArgs: `{"type":{"custom":true}}`, + expectedError: "Invalid filter 'type'", + }, + { + filtersArgs: `{"is-automated":{"invalid":true}}`, + expectedError: "Invalid filter 'is-automated=[invalid]'", + }, + { + filtersArgs: `{"is-automated":{"true":true,"false":true}}`, + expectedError: "Invalid filter 'is-automated", + }, + { + filtersArgs: `{"is-official":{"invalid":true}}`, + expectedError: "Invalid filter 'is-official=[invalid]'", + }, + { + filtersArgs: `{"is-official":{"true":true,"false":true}}`, + expectedError: "Invalid filter 'is-official", + }, + { + filtersArgs: `{"stars":{"invalid":true}}`, + expectedError: "Invalid filter 'stars=invalid'", + }, + { + filtersArgs: `{"stars":{"1":true,"invalid":true}}`, + expectedError: "Invalid filter 'stars=invalid'", + }, + } + for index, e := range errorCases { + daemon := &Daemon{ + RegistryService: &FakeService{ + shouldReturnError: e.shouldReturnError, + }, + } + _, err := daemon.SearchRegistryForImages(context.Background(), e.filtersArgs, "term", 25, nil, map[string][]string{}) + if err == nil { + t.Errorf("%d: expected an error, got nothing", index) + } + if !strings.Contains(err.Error(), e.expectedError) { + t.Errorf("%d: expected error to contain %s, got %s", index, e.expectedError, err.Error()) + } + } +} + +func TestSearchRegistryForImages(t *testing.T) { + term := "term" + successCases := []struct { + filtersArgs string + registryResults []registrytypes.SearchResult + expectedResults []registrytypes.SearchResult + }{ + { + filtersArgs: "", + registryResults: []registrytypes.SearchResult{}, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: "", + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + }, + { + filtersArgs: `{"is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + }, + { + filtersArgs: `{"is-automated":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-automated":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: false, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: false, + }, + }, + }, + { + filtersArgs: `{"is-official":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-official":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + }, + { + filtersArgs: `{"is-official":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-official":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: false, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: false, + }, + }, + }, + { + filtersArgs: `{"stars":{"0":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + }, + { + filtersArgs: `{"stars":{"1":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"stars":{"1":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name0", + Description: "description0", + StarCount: 0, + }, + { + Name: "name1", + Description: "description1", + StarCount: 1, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name1", + Description: "description1", + StarCount: 1, + }, + }, + }, + { + filtersArgs: `{"stars":{"1":true}, "is-official":{"true":true}, "is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name0", + Description: "description0", + StarCount: 0, + IsOfficial: true, + IsAutomated: true, + }, + { + Name: "name1", + Description: "description1", + StarCount: 1, + IsOfficial: false, + IsAutomated: true, + }, + { + Name: "name2", + Description: "description2", + StarCount: 1, + IsOfficial: true, + IsAutomated: false, + }, + { + Name: "name3", + Description: "description3", + StarCount: 2, + IsOfficial: true, + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name3", + Description: "description3", + StarCount: 2, + IsOfficial: true, + IsAutomated: true, + }, + }, + }, + } + for index, s := range successCases { + daemon := &Daemon{ + RegistryService: &FakeService{ + term: term, + results: s.registryResults, + }, + } + results, err := daemon.SearchRegistryForImages(context.Background(), s.filtersArgs, term, 25, nil, map[string][]string{}) + if err != nil { + t.Errorf("%d: %v", index, err) + } + if results.Query != term { + t.Errorf("%d: expected Query to be %s, got %s", index, term, results.Query) + } + if results.NumResults != len(s.expectedResults) { + t.Errorf("%d: expected NumResults to be %d, got %d", index, len(s.expectedResults), results.NumResults) + } + for _, result := range results.Results { + found := false + for _, expectedResult := range s.expectedResults { + if expectedResult.Name == result.Name && + expectedResult.Description == result.Description && + expectedResult.IsAutomated == result.IsAutomated && + expectedResult.IsOfficial == result.IsOfficial && + expectedResult.StarCount == result.StarCount { + found = true + break + } + } + if !found { + t.Errorf("%d: expected results %v, got %v", index, s.expectedResults, results.Results) + } + } + } +} diff --git a/components/engine/daemon/seccomp_disabled.go b/components/engine/daemon/seccomp_disabled.go new file mode 100644 index 0000000000..ff1127b6c2 --- /dev/null +++ b/components/engine/daemon/seccomp_disabled.go @@ -0,0 +1,19 @@ +// +build linux,!seccomp + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/opencontainers/runtime-spec/specs-go" +) + +var supportsSeccomp = false + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("seccomp profiles are not supported on this daemon, you cannot specify a custom seccomp profile") + } + return nil +} diff --git a/components/engine/daemon/seccomp_linux.go b/components/engine/daemon/seccomp_linux.go new file mode 100644 index 0000000000..472e3133c0 --- /dev/null +++ b/components/engine/daemon/seccomp_linux.go @@ -0,0 +1,55 @@ +// +build linux,seccomp + +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/profiles/seccomp" + "github.com/opencontainers/runtime-spec/specs-go" +) + +var supportsSeccomp = true + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + var profile *specs.LinuxSeccomp + var err error + + if c.HostConfig.Privileged { + return nil + } + + if !daemon.seccompEnabled { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") + } + logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") + c.SeccompProfile = "unconfined" + } + if c.SeccompProfile == "unconfined" { + return nil + } + if c.SeccompProfile != "" { + profile, err = seccomp.LoadProfile(c.SeccompProfile, rs) + if err != nil { + return err + } + } else { + if daemon.seccompProfile != nil { + profile, err = seccomp.LoadProfile(string(daemon.seccompProfile), rs) + if err != nil { + return err + } + } else { + profile, err = seccomp.GetDefaultProfile(rs) + if err != nil { + return err + } + } + } + + rs.Linux.Seccomp = profile + return nil +} diff --git a/components/engine/daemon/seccomp_unsupported.go b/components/engine/daemon/seccomp_unsupported.go new file mode 100644 index 0000000000..b3691e96af --- /dev/null +++ b/components/engine/daemon/seccomp_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux + +package daemon + +var supportsSeccomp = false diff --git a/components/engine/daemon/secrets.go b/components/engine/daemon/secrets.go new file mode 100644 index 0000000000..90fa99e987 --- /dev/null +++ b/components/engine/daemon/secrets.go @@ -0,0 +1,23 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + swarmtypes "github.com/docker/docker/api/types/swarm" +) + +// SetContainerSecretReferences sets the container secret references needed +func (daemon *Daemon) SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error { + if !secretsSupported() && len(refs) > 0 { + logrus.Warn("secrets are not supported on this platform") + return nil + } + + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.SecretReferences = refs + + return nil +} diff --git a/components/engine/daemon/secrets_linux.go b/components/engine/daemon/secrets_linux.go new file mode 100644 index 0000000000..fca4e12598 --- /dev/null +++ b/components/engine/daemon/secrets_linux.go @@ -0,0 +1,7 @@ +// +build linux + +package daemon + +func secretsSupported() bool { + return true +} diff --git a/components/engine/daemon/secrets_unsupported.go b/components/engine/daemon/secrets_unsupported.go new file mode 100644 index 0000000000..d55e8624d7 --- /dev/null +++ b/components/engine/daemon/secrets_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!windows + +package daemon + +func secretsSupported() bool { + return false +} diff --git a/components/engine/daemon/secrets_windows.go b/components/engine/daemon/secrets_windows.go new file mode 100644 index 0000000000..9054354c8d --- /dev/null +++ b/components/engine/daemon/secrets_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package daemon + +func secretsSupported() bool { + return true +} diff --git a/components/engine/daemon/selinux_linux.go b/components/engine/daemon/selinux_linux.go new file mode 100644 index 0000000000..fb2578bf4d --- /dev/null +++ b/components/engine/daemon/selinux_linux.go @@ -0,0 +1,17 @@ +// +build linux + +package daemon + +import "github.com/opencontainers/selinux/go-selinux" + +func selinuxSetDisabled() { + selinux.SetDisabled() +} + +func selinuxFreeLxcContexts(label string) { + selinux.ReleaseLabel(label) +} + +func selinuxEnabled() bool { + return selinux.GetEnabled() +} diff --git a/components/engine/daemon/selinux_unsupported.go b/components/engine/daemon/selinux_unsupported.go new file mode 100644 index 0000000000..25a56ad157 --- /dev/null +++ b/components/engine/daemon/selinux_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package daemon + +func selinuxSetDisabled() { +} + +func selinuxFreeLxcContexts(label string) { +} + +func selinuxEnabled() bool { + return false +} diff --git a/components/engine/daemon/start.go b/components/engine/daemon/start.go new file mode 100644 index 0000000000..eddb5d3d50 --- /dev/null +++ b/components/engine/daemon/start.go @@ -0,0 +1,229 @@ +package daemon + +import ( + "fmt" + "net/http" + "runtime" + "strings" + "syscall" + "time" + + "google.golang.org/grpc" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" +) + +// ContainerStart starts a container. +func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error { + if checkpoint != "" && !daemon.HasExperimental() { + return apierrors.NewBadRequestError(fmt.Errorf("checkpoint is only supported in experimental mode")) + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if container.IsPaused() { + return fmt.Errorf("Cannot start a paused container, try unpause instead.") + } + + if container.IsRunning() { + err := fmt.Errorf("Container already started") + return apierrors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + + // Windows does not have the backwards compatibility issue here. + if runtime.GOOS != "windows" { + // This is kept for backward compatibility - hostconfig should be passed when + // creating a container, not during start. + if hostConfig != nil { + logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12") + oldNetworkMode := container.HostConfig.NetworkMode + if err := daemon.setSecurityOptions(container, hostConfig); err != nil { + return err + } + if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil { + return err + } + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return err + } + newNetworkMode := container.HostConfig.NetworkMode + if string(oldNetworkMode) != string(newNetworkMode) { + // if user has change the network mode on starting, clean up the + // old networks. It is a deprecated feature and has been removed in Docker 1.12 + container.NetworkSettings.Networks = nil + if err := container.ToDisk(); err != nil { + return err + } + } + container.InitDNSHostConfig() + } + } else { + if hostConfig != nil { + return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") + } + } + + // check if hostConfig is in line with the current system settings. + // It may happen cgroups are umounted or the like. + if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false); err != nil { + return err + } + // Adapt for old containers in case we have updates in this function and + // old containers never have chance to call the new function in create stage. + if hostConfig != nil { + if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { + return err + } + } + + return daemon.containerStart(container, checkpoint, checkpointDir, true) +} + +// Start starts a container +func (daemon *Daemon) Start(container *container.Container) error { + return daemon.containerStart(container, "", "", true) +} + +// containerStart prepares the container to run by setting up everything the +// container needs, such as storage and networking, as well as links +// between containers. The container is left waiting for a signal to +// begin running. +func (daemon *Daemon) containerStart(container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (err error) { + start := time.Now() + container.Lock() + defer container.Unlock() + + if resetRestartManager && container.Running { // skip this check if already in restarting step and resetRestartManager==false + return nil + } + + if container.RemovalInProgress || container.Dead { + return fmt.Errorf("Container is marked for removal and cannot be started.") + } + + // if we encounter an error during start we need to ensure that any other + // setup has been cleaned up properly + defer func() { + if err != nil { + container.SetError(err) + // if no one else has set it, make sure we don't leave it at zero + if container.ExitCode() == 0 { + container.SetExitCode(128) + } + container.ToDisk() + + container.Reset(false) + + daemon.Cleanup(container) + // if containers AutoRemove flag is set, remove it after clean up + if container.HostConfig.AutoRemove { + container.Unlock() + if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("can't remove container %s: %v", container.ID, err) + } + container.Lock() + } + } + }() + + if err := daemon.conditionalMountOnStart(container); err != nil { + return err + } + + if err := daemon.initializeNetworking(container); err != nil { + return err + } + + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + createOptions, err := daemon.getLibcontainerdCreateOptions(container) + if err != nil { + return err + } + + if resetRestartManager { + container.ResetRestartManager(true) + } + + if checkpointDir == "" { + checkpointDir = container.CheckpointDir() + } + + if daemon.saveApparmorConfig(container); err != nil { + return err + } + + if err := daemon.containerd.Create(container.ID, checkpoint, checkpointDir, *spec, container.InitializeStdio, createOptions...); err != nil { + errDesc := grpc.ErrorDesc(err) + contains := func(s1, s2 string) bool { + return strings.Contains(strings.ToLower(s1), s2) + } + logrus.Errorf("Create container failed with error: %s", errDesc) + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + // set to 127 for container cmd not found/does not exist) + if contains(errDesc, container.Path) && + (contains(errDesc, "executable file not found") || + contains(errDesc, "no such file or directory") || + contains(errDesc, "system cannot find the file specified")) { + container.SetExitCode(127) + } + // set to 126 for container cmd can't be invoked errors + if contains(errDesc, syscall.EACCES.Error()) { + container.SetExitCode(126) + } + + // attempted to mount a file onto a directory, or a directory onto a file, maybe from user specified bind mounts + if contains(errDesc, syscall.ENOTDIR.Error()) { + errDesc += ": Are you trying to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type" + container.SetExitCode(127) + } + + return fmt.Errorf("%s", errDesc) + } + + containerActions.WithValues("start").UpdateSince(start) + + return nil +} + +// Cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. +func (daemon *Daemon) Cleanup(container *container.Container) { + daemon.releaseNetwork(container) + + container.UnmountIpcMounts(detachMounted) + + if err := daemon.conditionalUnmountOnCleanup(container); err != nil { + // FIXME: remove once reference counting for graphdrivers has been refactored + // Ensure that all the mounts are gone + if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + } + + if err := container.UnmountSecrets(); err != nil { + logrus.Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err) + } + + for _, eConfig := range container.ExecCommands.Commands() { + daemon.unregisterExecCommand(container, eConfig) + } + + if container.BaseFS != "" { + if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil { + logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) + } + } + container.CancelAttachContext() +} diff --git a/components/engine/daemon/start_unix.go b/components/engine/daemon/start_unix.go new file mode 100644 index 0000000000..103cc73b86 --- /dev/null +++ b/components/engine/daemon/start_unix.go @@ -0,0 +1,31 @@ +// +build !windows + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { + createOptions := []libcontainerd.CreateOption{} + + // Ensure a runtime has been assigned to this container + if container.HostConfig.Runtime == "" { + container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() + container.ToDisk() + } + + rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime) + if rt == nil { + return nil, fmt.Errorf("no such runtime '%s'", container.HostConfig.Runtime) + } + if UsingSystemd(daemon.configStore) { + rt.Args = append(rt.Args, "--systemd-cgroup=true") + } + createOptions = append(createOptions, libcontainerd.WithRuntime(rt.Path, rt.Args)) + + return createOptions, nil +} diff --git a/components/engine/daemon/start_windows.go b/components/engine/daemon/start_windows.go new file mode 100644 index 0000000000..0f9739091a --- /dev/null +++ b/components/engine/daemon/start_windows.go @@ -0,0 +1,221 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/windows/registry" +) + +const ( + credentialSpecRegistryLocation = `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + credentialSpecFileLocation = "CredentialSpecs" +) + +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { + createOptions := []libcontainerd.CreateOption{} + + // Are we going to run as a Hyper-V container? + hvOpts := &libcontainerd.HyperVIsolationOption{} + if container.HostConfig.Isolation.IsDefault() { + // Container is set to use the default, so take the default from the daemon configuration + hvOpts.IsHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container is requesting an isolation mode. Honour it. + hvOpts.IsHyperV = container.HostConfig.Isolation.IsHyperV() + } + + dnsSearch := daemon.getDNSSearchSettings(container) + if dnsSearch != nil { + osv := system.GetOSVersion() + if osv.Build < 14997 { + return nil, fmt.Errorf("dns-search option is not supported on the current platform") + } + } + + // Generate the layer folder of the layer options + layerOpts := &libcontainerd.LayerOption{} + m, err := container.RWLayer.Metadata() + if err != nil { + return nil, fmt.Errorf("failed to get layer metadata - %s", err) + } + if hvOpts.IsHyperV { + hvOpts.SandboxPath = filepath.Dir(m["dir"]) + } + + layerOpts.LayerFolderPath = m["dir"] + + // Generate the layer paths of the layer options + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return nil, fmt.Errorf("failed to graph.Get on ImageID %s - %s", container.ImageID, err) + } + // Get the layer path for each layer. + max := len(img.RootFS.DiffIDs) + for i := 1; i <= max; i++ { + img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] + layerPath, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) + if err != nil { + return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) + } + // Reverse order, expecting parent most first + layerOpts.LayerPaths = append([]string{layerPath}, layerOpts.LayerPaths...) + } + + // Get endpoints for the libnetwork allocated networks to the container + var epList []string + AllowUnqualifiedDNSQuery := false + gwHNSID := "" + if container.NetworkSettings != nil { + for n := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(n) + if err != nil { + continue + } + + ep, err := container.GetEndpointInNetwork(sn) + if err != nil { + continue + } + + data, err := ep.DriverInfo() + if err != nil { + continue + } + + if data["GW_INFO"] != nil { + gwInfo := data["GW_INFO"].(map[string]interface{}) + if gwInfo["hnsid"] != nil { + gwHNSID = gwInfo["hnsid"].(string) + } + } + + if data["hnsid"] != nil { + epList = append(epList, data["hnsid"].(string)) + } + + if data["AllowUnqualifiedDNSQuery"] != nil { + AllowUnqualifiedDNSQuery = true + } + } + } + + if gwHNSID != "" { + epList = append(epList, gwHNSID) + } + + // Read and add credentials from the security options if a credential spec has been provided. + if container.HostConfig.SecurityOpt != nil { + for _, sOpt := range container.HostConfig.SecurityOpt { + sOpt = strings.ToLower(sOpt) + if !strings.Contains(sOpt, "=") { + return nil, fmt.Errorf("invalid security option: no equals sign in supplied value %s", sOpt) + } + var splitsOpt []string + splitsOpt = strings.SplitN(sOpt, "=", 2) + if len(splitsOpt) != 2 { + return nil, fmt.Errorf("invalid security option: %s", sOpt) + } + if splitsOpt[0] != "credentialspec" { + return nil, fmt.Errorf("security option not supported: %s", splitsOpt[0]) + } + + credentialsOpts := &libcontainerd.CredentialsOption{} + var ( + match bool + csValue string + err error + ) + if match, csValue = getCredentialSpec("file://", splitsOpt[1]); match { + if csValue == "" { + return nil, fmt.Errorf("no value supplied for file:// credential spec security option") + } + if credentialsOpts.Credentials, err = readCredentialSpecFile(container.ID, daemon.root, filepath.Clean(csValue)); err != nil { + return nil, err + } + } else if match, csValue = getCredentialSpec("registry://", splitsOpt[1]); match { + if csValue == "" { + return nil, fmt.Errorf("no value supplied for registry:// credential spec security option") + } + if credentialsOpts.Credentials, err = readCredentialSpecRegistry(container.ID, csValue); err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("invalid credential spec security option - value must be prefixed file:// or registry:// followed by a value") + } + createOptions = append(createOptions, credentialsOpts) + } + } + + // Now add the remaining options. + createOptions = append(createOptions, &libcontainerd.FlushOption{IgnoreFlushesDuringBoot: !container.HasBeenStartedBefore}) + createOptions = append(createOptions, hvOpts) + createOptions = append(createOptions, layerOpts) + + var networkSharedContainerID string + if container.HostConfig.NetworkMode.IsContainer() { + networkSharedContainerID = container.NetworkSharedContainerID + } + createOptions = append(createOptions, &libcontainerd.NetworkEndpointsOption{ + Endpoints: epList, + AllowUnqualifiedDNSQuery: AllowUnqualifiedDNSQuery, + DNSSearchList: dnsSearch, + NetworkSharedContainerID: networkSharedContainerID, + }) + return createOptions, nil +} + +// getCredentialSpec is a helper function to get the value of a credential spec supplied +// on the CLI, stripping the prefix +func getCredentialSpec(prefix, value string) (bool, string) { + if strings.HasPrefix(value, prefix) { + return true, strings.TrimPrefix(value, prefix) + } + return false, "" +} + +// readCredentialSpecRegistry is a helper function to read a credential spec from +// the registry. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecRegistry(id, name string) (string, error) { + var ( + k registry.Key + err error + val string + ) + if k, err = registry.OpenKey(registry.LOCAL_MACHINE, credentialSpecRegistryLocation, registry.QUERY_VALUE); err != nil { + return "", fmt.Errorf("failed handling spec %q for container %s - %s could not be opened", name, id, credentialSpecRegistryLocation) + } + if val, _, err = k.GetStringValue(name); err != nil { + if err == registry.ErrNotExist { + return "", fmt.Errorf("credential spec %q for container %s as it was not found", name, id) + } + return "", fmt.Errorf("error %v reading credential spec %q from registry for container %s", err, name, id) + } + return val, nil +} + +// readCredentialSpecFile is a helper function to read a credential spec from +// a file. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecFile(id, root, location string) (string, error) { + if filepath.IsAbs(location) { + return "", fmt.Errorf("invalid credential spec - file:// path cannot be absolute") + } + base := filepath.Join(root, credentialSpecFileLocation) + full := filepath.Join(base, location) + if !strings.HasPrefix(full, base) { + return "", fmt.Errorf("invalid credential spec - file:// path must be under %s", base) + } + bcontents, err := ioutil.ReadFile(full) + if err != nil { + return "", fmt.Errorf("credential spec '%s' for container %s as the file could not be read: %q", full, id, err) + } + return string(bcontents[:]), nil +} diff --git a/components/engine/daemon/stats.go b/components/engine/daemon/stats.go new file mode 100644 index 0000000000..926f32efdb --- /dev/null +++ b/components/engine/daemon/stats.go @@ -0,0 +1,160 @@ +package daemon + +import ( + "encoding/json" + "errors" + "fmt" + "runtime" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerStats writes information about the container to the stream +// given in the config object. +func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *backend.ContainerStatsConfig) error { + if runtime.GOOS == "solaris" { + return fmt.Errorf("%+v does not support stats", runtime.GOOS) + } + // Engine API version (used for backwards compatibility) + apiVersion := config.Version + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + + // If the container is either not running or restarting and requires no stream, return an empty stats. + if (!container.IsRunning() || container.IsRestarting()) && !config.Stream { + return json.NewEncoder(config.OutStream).Encode(&types.StatsJSON{ + Name: container.Name, + ID: container.ID}) + } + + outStream := config.OutStream + if config.Stream { + wf := ioutils.NewWriteFlusher(outStream) + defer wf.Close() + wf.Flush() + outStream = wf + } + + var preCPUStats types.CPUStats + var preRead time.Time + getStatJSON := func(v interface{}) *types.StatsJSON { + ss := v.(types.StatsJSON) + ss.Name = container.Name + ss.ID = container.ID + ss.PreCPUStats = preCPUStats + ss.PreRead = preRead + preCPUStats = ss.CPUStats + preRead = ss.Read + return &ss + } + + enc := json.NewEncoder(outStream) + + updates := daemon.subscribeToContainerStats(container) + defer daemon.unsubscribeToContainerStats(container, updates) + + noStreamFirstFrame := true + for { + select { + case v, ok := <-updates: + if !ok { + return nil + } + + var statsJSON interface{} + statsJSONPost120 := getStatJSON(v) + if versions.LessThan(apiVersion, "1.21") { + if runtime.GOOS == "windows" { + return errors.New("API versions pre v1.21 do not support stats on Windows") + } + var ( + rxBytes uint64 + rxPackets uint64 + rxErrors uint64 + rxDropped uint64 + txBytes uint64 + txPackets uint64 + txErrors uint64 + txDropped uint64 + ) + for _, v := range statsJSONPost120.Networks { + rxBytes += v.RxBytes + rxPackets += v.RxPackets + rxErrors += v.RxErrors + rxDropped += v.RxDropped + txBytes += v.TxBytes + txPackets += v.TxPackets + txErrors += v.TxErrors + txDropped += v.TxDropped + } + statsJSON = &v1p20.StatsJSON{ + Stats: statsJSONPost120.Stats, + Network: types.NetworkStats{ + RxBytes: rxBytes, + RxPackets: rxPackets, + RxErrors: rxErrors, + RxDropped: rxDropped, + TxBytes: txBytes, + TxPackets: txPackets, + TxErrors: txErrors, + TxDropped: txDropped, + }, + } + } else { + statsJSON = statsJSONPost120 + } + + if !config.Stream && noStreamFirstFrame { + // prime the cpu stats so they aren't 0 in the final output + noStreamFirstFrame = false + continue + } + + if err := enc.Encode(statsJSON); err != nil { + return err + } + + if !config.Stream { + return nil + } + case <-ctx.Done(): + return nil + } + } +} + +func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { + return daemon.statsCollector.Collect(c) +} + +func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { + daemon.statsCollector.Unsubscribe(c, ch) +} + +// GetContainerStats collects all the stats published by a container +func (daemon *Daemon) GetContainerStats(container *container.Container) (*types.StatsJSON, error) { + stats, err := daemon.stats(container) + if err != nil { + return nil, err + } + + // We already have the network stats on Windows directly from HCS. + if !container.Config.NetworkDisabled && runtime.GOOS != "windows" { + if stats.Networks, err = daemon.getNetworkStats(container); err != nil { + return nil, err + } + } + + return stats, nil +} diff --git a/components/engine/daemon/stats/collector.go b/components/engine/daemon/stats/collector.go new file mode 100644 index 0000000000..71bcff446e --- /dev/null +++ b/components/engine/daemon/stats/collector.go @@ -0,0 +1,116 @@ +// +build !solaris + +package stats + +import ( + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/pubsub" +) + +// Collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *Collector) Collect(c *container.Container) chan interface{} { + s.m.Lock() + defer s.m.Unlock() + publisher, exists := s.publishers[c] + if !exists { + publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) + s.publishers[c] = publisher + } + return publisher.Subscribe() +} + +// StopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *Collector) StopCollection(c *container.Container) { + s.m.Lock() + if publisher, exists := s.publishers[c]; exists { + publisher.Close() + delete(s.publishers, c) + } + s.m.Unlock() +} + +// Unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *Collector) Unsubscribe(c *container.Container, ch chan interface{}) { + s.m.Lock() + publisher := s.publishers[c] + if publisher != nil { + publisher.Evict(ch) + if publisher.Len() == 0 { + delete(s.publishers, c) + } + } + s.m.Unlock() +} + +// Run starts the collectors and will indefinitely collect stats from the supervisor +func (s *Collector) Run() { + type publishersPair struct { + container *container.Container + publisher *pubsub.Publisher + } + // we cannot determine the capacity here. + // it will grow enough in first iteration + var pairs []publishersPair + + for range time.Tick(s.interval) { + // it does not make sense in the first iteration, + // but saves allocations in further iterations + pairs = pairs[:0] + + s.m.Lock() + for container, publisher := range s.publishers { + // copy pointers here to release the lock ASAP + pairs = append(pairs, publishersPair{container, publisher}) + } + s.m.Unlock() + if len(pairs) == 0 { + continue + } + + systemUsage, err := s.getSystemCPUUsage() + if err != nil { + logrus.Errorf("collecting system cpu usage: %v", err) + continue + } + + onlineCPUs, err := s.getNumberOnlineCPUs() + if err != nil { + logrus.Errorf("collecting system online cpu count: %v", err) + continue + } + + for _, pair := range pairs { + stats, err := s.supervisor.GetContainerStats(pair.container) + if err != nil { + if _, ok := err.(notRunningErr); !ok { + logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) + continue + } + + // publish empty stats containing only name and ID if not running + pair.publisher.Publish(types.StatsJSON{ + Name: pair.container.Name, + ID: pair.container.ID, + }) + continue + } + // FIXME: move to containerd on Linux (not Windows) + stats.CPUStats.SystemUsage = systemUsage + stats.CPUStats.OnlineCPUs = onlineCPUs + + pair.publisher.Publish(*stats) + } + } +} + +type notRunningErr interface { + error + ContainerIsRunning() bool +} diff --git a/components/engine/daemon/stats/collector_solaris.go b/components/engine/daemon/stats/collector_solaris.go new file mode 100644 index 0000000000..3699d08c10 --- /dev/null +++ b/components/engine/daemon/stats/collector_solaris.go @@ -0,0 +1,29 @@ +package stats + +import ( + "github.com/docker/docker/container" +) + +// platformNewStatsCollector performs platform specific initialisation of the +// Collector structure. This is a no-op on Windows. +func platformNewStatsCollector(s *Collector) { +} + +// Collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +// Currently not supported on Solaris +func (s *Collector) Collect(c *container.Container) chan interface{} { + return nil +} + +// StopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +// Currently not supported on Solaris +func (s *Collector) StopCollection(c *container.Container) { +} + +// Unsubscribe removes a specific subscriber from receiving updates for a container's stats. +// Currently not supported on Solaris +func (s *Collector) Unsubscribe(c *container.Container, ch chan interface{}) { +} diff --git a/components/engine/daemon/stats/collector_unix.go b/components/engine/daemon/stats/collector_unix.go new file mode 100644 index 0000000000..5ad9658690 --- /dev/null +++ b/components/engine/daemon/stats/collector_unix.go @@ -0,0 +1,79 @@ +// +build !windows,!solaris + +package stats + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/opencontainers/runc/libcontainer/system" +) + +/* +#include +*/ +import "C" + +// platformNewStatsCollector performs platform specific initialisation of the +// Collector structure. +func platformNewStatsCollector(s *Collector) { + s.clockTicksPerSecond = uint64(system.GetClockTicks()) +} + +const nanoSecondsPerSecond = 1e9 + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func (s *Collector) getSystemCPUUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + defer func() { + s.bufReader.Reset(nil) + f.Close() + }() + s.bufReader.Reset(f) + err = nil + for err == nil { + line, err = s.bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("invalid number of cpu fields") + } + var totalClockTicks uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) + } + totalClockTicks += v + } + return (totalClockTicks * nanoSecondsPerSecond) / + s.clockTicksPerSecond, nil + } + } + return 0, fmt.Errorf("invalid stat format. Error trying to parse the '/proc/stat' file") +} + +func (s *Collector) getNumberOnlineCPUs() (uint32, error) { + i, err := C.sysconf(C._SC_NPROCESSORS_ONLN) + if err != nil { + return 0, err + } + return uint32(i), nil +} diff --git a/components/engine/daemon/stats/collector_windows.go b/components/engine/daemon/stats/collector_windows.go new file mode 100644 index 0000000000..5fb27ced66 --- /dev/null +++ b/components/engine/daemon/stats/collector_windows.go @@ -0,0 +1,19 @@ +// +build windows + +package stats + +// platformNewStatsCollector performs platform specific initialisation of the +// Collector structure. This is a no-op on Windows. +func platformNewStatsCollector(s *Collector) { +} + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. This is a no-op on Windows. +func (s *Collector) getSystemCPUUsage() (uint64, error) { + return 0, nil +} + +func (s *Collector) getNumberOnlineCPUs() (uint32, error) { + return 0, nil +} diff --git a/components/engine/daemon/stats/types.go b/components/engine/daemon/stats/types.go new file mode 100644 index 0000000000..e48783c9a9 --- /dev/null +++ b/components/engine/daemon/stats/types.go @@ -0,0 +1,42 @@ +package stats + +import ( + "bufio" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/pubsub" +) + +type supervisor interface { + // GetContainerStats collects all the stats related to a container + GetContainerStats(container *container.Container) (*types.StatsJSON, error) +} + +// NewCollector creates a stats collector that will poll the supervisor with the specified interval +func NewCollector(supervisor supervisor, interval time.Duration) *Collector { + s := &Collector{ + interval: interval, + supervisor: supervisor, + publishers: make(map[*container.Container]*pubsub.Publisher), + bufReader: bufio.NewReaderSize(nil, 128), + } + + platformNewStatsCollector(s) + + return s +} + +// Collector manages and provides container resource stats +type Collector struct { + m sync.Mutex + supervisor supervisor + interval time.Duration + publishers map[*container.Container]*pubsub.Publisher + bufReader *bufio.Reader + + // The following fields are not set on Windows currently. + clockTicksPerSecond uint64 +} diff --git a/components/engine/daemon/stats_collector.go b/components/engine/daemon/stats_collector.go new file mode 100644 index 0000000000..7daf26f9f7 --- /dev/null +++ b/components/engine/daemon/stats_collector.go @@ -0,0 +1,26 @@ +package daemon + +import ( + "runtime" + "time" + + "github.com/docker/docker/daemon/stats" + "github.com/docker/docker/pkg/system" +) + +// newStatsCollector returns a new statsCollector that collections +// stats for a registered container at the specified interval. +// The collector allows non-running containers to be added +// and will start processing stats when they are started. +func (daemon *Daemon) newStatsCollector(interval time.Duration) *stats.Collector { + // FIXME(vdemeester) move this elsewhere + if runtime.GOOS == "linux" { + meminfo, err := system.ReadMemInfo() + if err == nil && meminfo.MemTotal > 0 { + daemon.machineMemory = uint64(meminfo.MemTotal) + } + } + s := stats.NewCollector(daemon, interval) + go s.Run() + return s +} diff --git a/components/engine/daemon/stats_unix.go b/components/engine/daemon/stats_unix.go new file mode 100644 index 0000000000..d875607b3a --- /dev/null +++ b/components/engine/daemon/stats_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" +) + +// Resolve Network SandboxID in case the container reuse another container's network stack +func (daemon *Daemon) getNetworkSandboxID(c *container.Container) (string, error) { + curr := c + for curr.HostConfig.NetworkMode.IsContainer() { + containerID := curr.HostConfig.NetworkMode.ConnectedContainer() + connected, err := daemon.GetContainer(containerID) + if err != nil { + return "", fmt.Errorf("Could not get container for %s", containerID) + } + curr = connected + } + return curr.NetworkSettings.SandboxID, nil +} + +func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { + sandboxID, err := daemon.getNetworkSandboxID(c) + if err != nil { + return nil, err + } + + sb, err := daemon.netController.SandboxByID(sandboxID) + if err != nil { + return nil, err + } + + lnstats, err := sb.Statistics() + if err != nil { + return nil, err + } + + stats := make(map[string]types.NetworkStats) + // Convert libnetwork nw stats into api stats + for ifName, ifStats := range lnstats { + stats[ifName] = types.NetworkStats{ + RxBytes: ifStats.RxBytes, + RxPackets: ifStats.RxPackets, + RxErrors: ifStats.RxErrors, + RxDropped: ifStats.RxDropped, + TxBytes: ifStats.TxBytes, + TxPackets: ifStats.TxPackets, + TxErrors: ifStats.TxErrors, + TxDropped: ifStats.TxDropped, + } + } + + return stats, nil +} diff --git a/components/engine/daemon/stats_windows.go b/components/engine/daemon/stats_windows.go new file mode 100644 index 0000000000..f8e6f6f84a --- /dev/null +++ b/components/engine/daemon/stats_windows.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" +) + +// Windows network stats are obtained directly through HCS, hence this is a no-op. +func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { + return make(map[string]types.NetworkStats), nil +} diff --git a/components/engine/daemon/stop.go b/components/engine/daemon/stop.go new file mode 100644 index 0000000000..6a4776d155 --- /dev/null +++ b/components/engine/daemon/stop.go @@ -0,0 +1,91 @@ +package daemon + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + containerpkg "github.com/docker/docker/container" +) + +// ContainerStop looks for the given container and terminates it, +// waiting the given number of seconds before forcefully killing the +// container. If a negative number of seconds is given, ContainerStop +// will wait for a graceful termination. An error is returned if the +// container is not found, is already stopped, or if there is a +// problem stopping the container. +func (daemon *Daemon) ContainerStop(name string, seconds *int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if !container.IsRunning() { + err := fmt.Errorf("Container %s is already stopped", name) + return errors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + if seconds == nil { + stopTimeout := container.StopTimeout() + seconds = &stopTimeout + } + if err := daemon.containerStop(container, *seconds); err != nil { + return fmt.Errorf("Cannot stop container %s: %v", name, err) + } + return nil +} + +// containerStop halts a container by sending a stop signal, waiting for the given +// duration in seconds, and then calling SIGKILL and waiting for the +// process to exit. If a negative duration is given, Stop will wait +// for the initial signal forever. If the container is not running Stop returns +// immediately. +func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds int) error { + if !container.IsRunning() { + return nil + } + + daemon.stopHealthchecks(container) + + stopSignal := container.StopSignal() + // 1. Send a stop signal + if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // it's probably because it's already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we force kill it. + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { + logrus.Infof("Container failed to stop after sending signal %d to the process, force killing", stopSignal) + if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { + return err + } + } + } + + // 2. Wait for the process to exit on its own + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(seconds)*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { + logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) + // 3. If it doesn't, then send SIGKILL + if err := daemon.Kill(container); err != nil { + // Wait without a timeout, ignore result. + _ = <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) + logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it + } + } + + daemon.LogContainerEvent(container, "stop") + return nil +} diff --git a/components/engine/daemon/top_unix.go b/components/engine/daemon/top_unix.go new file mode 100644 index 0000000000..4d94d4a62f --- /dev/null +++ b/components/engine/daemon/top_unix.go @@ -0,0 +1,155 @@ +//+build !windows + +package daemon + +import ( + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/api/types/container" +) + +func validatePSArgs(psArgs string) error { + // NOTE: \\s does not detect unicode whitespaces. + // So we use fieldsASCII instead of strings.Fields in parsePSOutput. + // See https://github.com/docker/docker/pull/24358 + re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)") + for _, group := range re.FindAllStringSubmatch(psArgs, -1) { + if len(group) >= 3 { + k := group[1] + v := group[2] + if k != "pid" { + return fmt.Errorf("specifying \"%s=%s\" is not allowed", k, v) + } + } + } + return nil +} + +// fieldsASCII is similar to strings.Fields but only allows ASCII whitespaces +func fieldsASCII(s string) []string { + fn := func(r rune) bool { + switch r { + case '\t', '\n', '\f', '\r', ' ': + return true + } + return false + } + return strings.FieldsFunc(s, fn) +} + +func appendProcess2ProcList(procList *container.ContainerTopOKBody, fields []string) { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(procList.Titles)-1] + process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) + procList.Processes = append(procList.Processes, process) +} + +func hasPid(pids []int, pid int) bool { + for _, i := range pids { + if i == pid { + return true + } + } + return false +} + +func parsePSOutput(output []byte, pids []int) (*container.ContainerTopOKBody, error) { + procList := &container.ContainerTopOKBody{} + + lines := strings.Split(string(output), "\n") + procList.Titles = fieldsASCII(lines[0]) + + pidIndex := -1 + for i, name := range procList.Titles { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return nil, fmt.Errorf("Couldn't find PID field in ps output") + } + + // loop through the output and extract the PID from each line + // fixing #30580, be able to display thread line also when "m" option used + // in "docker top" client command + preContainedPidFlag := false + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := fieldsASCII(line) + + var ( + p int + err error + ) + + if fields[pidIndex] == "-" { + if preContainedPidFlag { + appendProcess2ProcList(procList, fields) + } + continue + } + p, err = strconv.Atoi(fields[pidIndex]) + if err != nil { + return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + if hasPid(pids, p) { + preContainedPidFlag = true + appendProcess2ProcList(procList, fields) + continue + } + preContainedPidFlag = false + } + return procList, nil +} + +// ContainerTop lists the processes running inside of the given +// container by calling ps with the given args, or with the flags +// "-ef" if no args are given. An error is returned if the container +// is not found, or is not running, or if there are any problems +// running ps, or parsing the output. +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error) { + if psArgs == "" { + psArgs = "-ef" + } + + if err := validatePSArgs(psArgs); err != nil { + return nil, err + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + + pids, err := daemon.containerd.GetPidsForContainer(container.ID) + if err != nil { + return nil, err + } + + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + if err != nil { + return nil, fmt.Errorf("Error running ps: %v", err) + } + procList, err := parsePSOutput(output, pids) + if err != nil { + return nil, err + } + daemon.LogContainerEvent(container, "top") + return procList, nil +} diff --git a/components/engine/daemon/top_unix_test.go b/components/engine/daemon/top_unix_test.go new file mode 100644 index 0000000000..9a3749f711 --- /dev/null +++ b/components/engine/daemon/top_unix_test.go @@ -0,0 +1,79 @@ +//+build !windows + +package daemon + +import ( + "testing" +) + +func TestContainerTopValidatePSArgs(t *testing.T) { + tests := map[string]bool{ + "ae -o uid=PID": true, + "ae -o \"uid= PID\"": true, // ascii space (0x20) + "ae -o \"uid= PID\"": false, // unicode space (U+2003, 0xe2 0x80 0x83) + "ae o uid=PID": true, + "aeo uid=PID": true, + "ae -O uid=PID": true, + "ae -o pid=PID2 -o uid=PID": true, + "ae -o pid=PID": false, + "ae -o pid=PID -o uid=PIDX": true, // FIXME: we do not need to prohibit this + "aeo pid=PID": false, + "ae": false, + "": false, + } + for psArgs, errExpected := range tests { + err := validatePSArgs(psArgs) + t.Logf("tested %q, got err=%v", psArgs, err) + if errExpected && err == nil { + t.Fatalf("expected error, got %v (%q)", err, psArgs) + } + if !errExpected && err != nil { + t.Fatalf("expected nil, got %v (%q)", err, psArgs) + } + } +} + +func TestContainerTopParsePSOutput(t *testing.T) { + tests := []struct { + output []byte + pids []int + errExpected bool + }{ + {[]byte(` PID COMMAND + 42 foo + 43 bar + - - + 100 baz +`), []int{42, 43}, false}, + {[]byte(` UID COMMAND + 42 foo + 43 bar + - - + 100 baz +`), []int{42, 43}, true}, + // unicode space (U+2003, 0xe2 0x80 0x83) + {[]byte(` PID COMMAND + 42 foo + 43 bar + - - + 100 baz +`), []int{42, 43}, true}, + // the first space is U+2003, the second one is ascii. + {[]byte(` PID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, true}, + } + + for _, f := range tests { + _, err := parsePSOutput(f.output, f.pids) + t.Logf("tested %q, got err=%v", string(f.output), err) + if f.errExpected && err == nil { + t.Fatalf("expected error, got %v (%q)", err, string(f.output)) + } + if !f.errExpected && err != nil { + t.Fatalf("expected nil, got %v (%q)", err, string(f.output)) + } + } +} diff --git a/components/engine/daemon/top_windows.go b/components/engine/daemon/top_windows.go new file mode 100644 index 0000000000..000720b004 --- /dev/null +++ b/components/engine/daemon/top_windows.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "errors" + "fmt" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/go-units" +) + +// ContainerTop handles `docker top` client requests. +// Future considerations: +// -- Windows users are far more familiar with CPU% total. +// Further, users on Windows rarely see user/kernel CPU stats split. +// The kernel returns everything in terms of 100ns. To obtain +// CPU%, we could do something like docker stats does which takes two +// samples, subtract the difference and do the maths. Unfortunately this +// would slow the stat call down and require two kernel calls. So instead, +// we do something similar to linux and display the CPU as combined HH:MM:SS.mmm. +// -- Perhaps we could add an argument to display "raw" stats +// -- "Memory" is an extremely overloaded term in Windows. Hence we do what +// task manager does and use the private working set as the memory counter. +// We could return more info for those who really understand how memory +// management works in Windows if we introduced a "raw" stats (above). +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*containertypes.ContainerTopOKBody, error) { + // It's not at all an equivalent to linux 'ps' on Windows + if psArgs != "" { + return nil, errors.New("Windows does not support arguments to top") + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + s, err := daemon.containerd.Summary(container.ID) + if err != nil { + return nil, err + } + procList := &containertypes.ContainerTopOKBody{} + procList.Titles = []string{"Name", "PID", "CPU", "Private Working Set"} + + for _, j := range s { + d := time.Duration((j.KernelTime100ns + j.UserTime100ns) * 100) // Combined time in nanoseconds + procList.Processes = append(procList.Processes, []string{ + j.ImageName, + fmt.Sprint(j.ProcessId), + fmt.Sprintf("%02d:%02d:%02d.%03d", int(d.Hours()), int(d.Minutes())%60, int(d.Seconds())%60, int(d.Nanoseconds()/1000000)%1000), + units.HumanSize(float64(j.MemoryWorkingSetPrivateBytes))}) + } + return procList, nil +} diff --git a/components/engine/daemon/unpause.go b/components/engine/daemon/unpause.go new file mode 100644 index 0000000000..e66b3868dc --- /dev/null +++ b/components/engine/daemon/unpause.go @@ -0,0 +1,38 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerUnpause unpauses a container +func (daemon *Daemon) ContainerUnpause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerUnpause(container); err != nil { + return err + } + + return nil +} + +// containerUnpause resumes the container execution after the container is paused. +func (daemon *Daemon) containerUnpause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot unpause the container which is not paused + if !container.Paused { + return fmt.Errorf("Container %s is not paused", container.ID) + } + + if err := daemon.containerd.Resume(container.ID); err != nil { + return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/components/engine/daemon/update.go b/components/engine/daemon/update.go new file mode 100644 index 0000000000..76e4a3f93f --- /dev/null +++ b/components/engine/daemon/update.go @@ -0,0 +1,78 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/types/container" +) + +// ContainerUpdate updates configuration of the container +func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { + var warnings []string + + warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true) + if err != nil { + return container.ContainerUpdateOKBody{Warnings: warnings}, err + } + + if err := daemon.update(name, hostConfig); err != nil { + return container.ContainerUpdateOKBody{Warnings: warnings}, err + } + + return container.ContainerUpdateOKBody{Warnings: warnings}, nil +} + +func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { + if hostConfig == nil { + return nil + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + restoreConfig := false + backupHostConfig := *container.HostConfig + defer func() { + if restoreConfig { + container.Lock() + container.HostConfig = &backupHostConfig + container.ToDisk() + container.Unlock() + } + }() + + if container.RemovalInProgress || container.Dead { + return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\".")) + } + + if err := container.UpdateContainer(hostConfig); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + + // if Restart Policy changed, we need to update container monitor + if hostConfig.RestartPolicy.Name != "" { + container.UpdateMonitor(hostConfig.RestartPolicy) + } + + // If container is not running, update hostConfig struct is enough, + // resources will be updated when the container is started again. + // If container is running (including paused), we need to update configs + // to the real world. + if container.IsRunning() && !container.IsRestarting() { + if err := daemon.containerd.UpdateResources(container.ID, toContainerdResources(hostConfig.Resources)); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + } + + daemon.LogContainerEvent(container, "update") + + return nil +} + +func errCannotUpdate(containerID string, err error) error { + return fmt.Errorf("Cannot update container %s: %v", containerID, err) +} diff --git a/components/engine/daemon/update_linux.go b/components/engine/daemon/update_linux.go new file mode 100644 index 0000000000..c128967218 --- /dev/null +++ b/components/engine/daemon/update_linux.go @@ -0,0 +1,32 @@ +// +build linux + +package daemon + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + r.BlkioWeight = uint64(resources.BlkioWeight) + r.CpuShares = uint64(resources.CPUShares) + if resources.NanoCPUs != 0 { + r.CpuPeriod = uint64(100 * time.Millisecond / time.Microsecond) + r.CpuQuota = uint64(resources.NanoCPUs) * r.CpuPeriod / 1e9 + } else { + r.CpuPeriod = uint64(resources.CPUPeriod) + r.CpuQuota = uint64(resources.CPUQuota) + } + r.CpusetCpus = resources.CpusetCpus + r.CpusetMems = resources.CpusetMems + r.MemoryLimit = uint64(resources.Memory) + if resources.MemorySwap > 0 { + r.MemorySwap = uint64(resources.MemorySwap) + } + r.MemoryReservation = uint64(resources.MemoryReservation) + r.KernelMemoryLimit = uint64(resources.KernelMemory) + return r +} diff --git a/components/engine/daemon/update_solaris.go b/components/engine/daemon/update_solaris.go new file mode 100644 index 0000000000..f3b545c5f0 --- /dev/null +++ b/components/engine/daemon/update_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + return r +} diff --git a/components/engine/daemon/update_windows.go b/components/engine/daemon/update_windows.go new file mode 100644 index 0000000000..01466260bb --- /dev/null +++ b/components/engine/daemon/update_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + return r +} diff --git a/components/engine/daemon/volumes.go b/components/engine/daemon/volumes.go new file mode 100644 index 0000000000..26204338eb --- /dev/null +++ b/components/engine/daemon/volumes.go @@ -0,0 +1,392 @@ +package daemon + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "strings" + + "github.com/Sirupsen/logrus" + dockererrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +var ( + // ErrVolumeReadonly is used to signal an error when trying to copy data into + // a volume mount that is not writable. + ErrVolumeReadonly = errors.New("mounted volume is marked read-only") +) + +type mounts []container.Mount + +// volumeToAPIType converts a volume.Volume to the type used by the Engine API +func volumeToAPIType(v volume.Volume) *types.Volume { + tv := &types.Volume{ + Name: v.Name(), + Driver: v.DriverName(), + } + if v, ok := v.(volume.DetailedVolume); ok { + tv.Labels = v.Labels() + tv.Options = v.Options() + tv.Scope = v.Scope() + } + + return tv +} + +// Len returns the number of mounts. Used in sorting. +func (m mounts) Len() int { + return len(m) +} + +// Less returns true if the number of parts (a/b/c would be 3 parts) in the +// mount indexed by parameter 1 is less than that of the mount indexed by +// parameter 2. Used in sorting. +func (m mounts) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +// Swap swaps two items in an array of mounts. Used in sorting +func (m mounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +// parts returns the number of parts in the destination of a mount. Used in sorting. +func (m mounts) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} + +// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. +// It follows the next sequence to decide what to mount in each final destination: +// +// 1. Select the previously configured mount points for the containers, if any. +// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. +// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. +// 4. Cleanup old volumes that are about to be reassigned. +func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) { + binds := map[string]bool{} + mountPoints := map[string]*volume.MountPoint{} + defer func() { + // clean up the container mountpoints once return with error + if retErr != nil { + for _, m := range mountPoints { + if m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + } + } + }() + + dereferenceIfExists := func(destination string) { + if v, ok := mountPoints[destination]; ok { + logrus.Debugf("Duplicate mount point '%s'", destination) + if v.Volume != nil { + daemon.volumes.Dereference(v.Volume, container.ID) + } + } + } + + // 1. Read already configured mount points. + for destination, point := range container.MountPoints { + mountPoints[destination] = point + } + + // 2. Read volumes from other containers. + for _, v := range hostConfig.VolumesFrom { + containerID, mode, err := volume.ParseVolumesFrom(v) + if err != nil { + return err + } + + c, err := daemon.GetContainer(containerID) + if err != nil { + return err + } + + for _, m := range c.MountPoints { + cp := &volume.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Source, + RW: m.RW && volume.ReadWrite(mode), + Driver: m.Driver, + Destination: m.Destination, + Propagation: m.Propagation, + Spec: m.Spec, + CopyData: false, + } + + if len(cp.Source) == 0 { + v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) + if err != nil { + return err + } + cp.Volume = v + } + dereferenceIfExists(cp.Destination) + mountPoints[cp.Destination] = cp + } + } + + // 3. Read bind mounts + for _, b := range hostConfig.Binds { + bind, err := volume.ParseMountRaw(b, hostConfig.VolumeDriver) + if err != nil { + return err + } + + // #10618 + _, tmpfsExists := hostConfig.Tmpfs[bind.Destination] + if binds[bind.Destination] || tmpfsExists { + return fmt.Errorf("Duplicate mount point '%s'", bind.Destination) + } + + if bind.Type == mounttypes.TypeVolume { + // create the volume + v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil) + if err != nil { + return err + } + bind.Volume = v + bind.Source = v.Path() + // bind.Name is an already existing volume, we need to use that here + bind.Driver = v.DriverName() + if bind.Driver == volume.DefaultDriverName { + setBindModeIfNull(bind) + } + } + + binds[bind.Destination] = true + dereferenceIfExists(bind.Destination) + mountPoints[bind.Destination] = bind + } + + for _, cfg := range hostConfig.Mounts { + mp, err := volume.ParseMountSpec(cfg) + if err != nil { + return dockererrors.NewBadRequestError(err) + } + + if binds[mp.Destination] { + return fmt.Errorf("Duplicate mount point '%s'", cfg.Target) + } + + if mp.Type == mounttypes.TypeVolume { + var v volume.Volume + if cfg.VolumeOptions != nil { + var driverOpts map[string]string + if cfg.VolumeOptions.DriverConfig != nil { + driverOpts = cfg.VolumeOptions.DriverConfig.Options + } + v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, driverOpts, cfg.VolumeOptions.Labels) + } else { + v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, nil, nil) + } + if err != nil { + return err + } + + mp.Volume = v + mp.Name = v.Name() + mp.Driver = v.DriverName() + + // only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow + if cv, ok := v.(interface { + CachedPath() string + }); ok { + mp.Source = cv.CachedPath() + } + } + + binds[mp.Destination] = true + dereferenceIfExists(mp.Destination) + mountPoints[mp.Destination] = mp + } + + container.Lock() + + // 4. Cleanup old volumes that are about to be reassigned. + for _, m := range mountPoints { + if m.BackwardsCompatible() { + if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { + daemon.volumes.Dereference(mp.Volume, container.ID) + } + } + } + container.MountPoints = mountPoints + + container.Unlock() + + return nil +} + +// lazyInitializeVolume initializes a mountpoint's volume if needed. +// This happens after a daemon restart. +func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { + if len(m.Driver) > 0 && m.Volume == nil { + v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) + if err != nil { + return err + } + m.Volume = v + } + return nil +} + +// backportMountSpec resolves mount specs (introduced in 1.13) from pre-1.13 +// mount configurations +// The container lock should not be held when calling this function. +// Changes are only made in-memory and may make changes to containers referenced +// by `container.HostConfig.VolumesFrom` +func (daemon *Daemon) backportMountSpec(container *container.Container) { + container.Lock() + defer container.Unlock() + + maybeUpdate := make(map[string]bool) + for _, mp := range container.MountPoints { + if mp.Spec.Source != "" && mp.Type != "" { + continue + } + maybeUpdate[mp.Destination] = true + } + if len(maybeUpdate) == 0 { + return + } + + mountSpecs := make(map[string]bool, len(container.HostConfig.Mounts)) + for _, m := range container.HostConfig.Mounts { + mountSpecs[m.Target] = true + } + + binds := make(map[string]*volume.MountPoint, len(container.HostConfig.Binds)) + for _, rawSpec := range container.HostConfig.Binds { + mp, err := volume.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver) + if err != nil { + logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport") + continue + } + binds[mp.Destination] = mp + } + + volumesFrom := make(map[string]volume.MountPoint) + for _, fromSpec := range container.HostConfig.VolumesFrom { + from, _, err := volume.ParseVolumesFrom(fromSpec) + if err != nil { + logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport") + continue + } + fromC, err := daemon.GetContainer(from) + if err != nil { + logrus.WithError(err).WithField("from-container", from).Error("Error looking up volumes-from container") + continue + } + + // make sure from container's specs have been backported + daemon.backportMountSpec(fromC) + + fromC.Lock() + for t, mp := range fromC.MountPoints { + volumesFrom[t] = *mp + } + fromC.Unlock() + } + + needsUpdate := func(containerMount, other *volume.MountPoint) bool { + if containerMount.Type != other.Type || !reflect.DeepEqual(containerMount.Spec, other.Spec) { + return true + } + return false + } + + // main + for _, cm := range container.MountPoints { + if !maybeUpdate[cm.Destination] { + continue + } + // nothing to backport if from hostconfig.Mounts + if mountSpecs[cm.Destination] { + continue + } + + if mp, exists := binds[cm.Destination]; exists { + if needsUpdate(cm, mp) { + cm.Spec = mp.Spec + cm.Type = mp.Type + } + continue + } + + if cm.Name != "" { + if mp, exists := volumesFrom[cm.Destination]; exists { + if needsUpdate(cm, &mp) { + cm.Spec = mp.Spec + cm.Type = mp.Type + } + continue + } + + if cm.Type != "" { + // probably specified via the hostconfig.Mounts + continue + } + + // anon volume + cm.Type = mounttypes.TypeVolume + cm.Spec.Type = mounttypes.TypeVolume + } else { + if cm.Type != "" { + // already updated + continue + } + + cm.Type = mounttypes.TypeBind + cm.Spec.Type = mounttypes.TypeBind + cm.Spec.Source = cm.Source + if cm.Propagation != "" { + cm.Spec.BindOptions = &mounttypes.BindOptions{ + Propagation: cm.Propagation, + } + } + } + + cm.Spec.Target = cm.Destination + cm.Spec.ReadOnly = !cm.RW + } +} + +func (daemon *Daemon) traverseLocalVolumes(fn func(volume.Volume) error) error { + localVolumeDriver, err := volumedrivers.GetDriver(volume.DefaultDriverName) + if err != nil { + return fmt.Errorf("can't retrieve local volume driver: %v", err) + } + vols, err := localVolumeDriver.List() + if err != nil { + return fmt.Errorf("can't retrieve local volumes: %v", err) + } + + for _, v := range vols { + name := v.Name() + vol, err := daemon.volumes.Get(name) + if err != nil { + logrus.Warnf("failed to retrieve volume %s from store: %v", name, err) + } else { + // daemon.volumes.Get will return DetailedVolume + v = vol + } + + err = fn(v) + if err != nil { + return err + } + } + + return nil +} diff --git a/components/engine/daemon/volumes_unit_test.go b/components/engine/daemon/volumes_unit_test.go new file mode 100644 index 0000000000..450d17f978 --- /dev/null +++ b/components/engine/daemon/volumes_unit_test.go @@ -0,0 +1,39 @@ +package daemon + +import ( + "testing" + + "github.com/docker/docker/volume" +) + +func TestParseVolumesFrom(t *testing.T) { + cases := []struct { + spec string + expID string + expMode string + fail bool + }{ + {"", "", "", true}, + {"foobar", "foobar", "rw", false}, + {"foobar:rw", "foobar", "rw", false}, + {"foobar:ro", "foobar", "ro", false}, + {"foobar:baz", "", "", true}, + } + + for _, c := range cases { + id, mode, err := volume.ParseVolumesFrom(c.spec) + if c.fail { + if err == nil { + t.Fatalf("Expected error, was nil, for spec %s\n", c.spec) + } + continue + } + + if id != c.expID { + t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expID, id, c.spec) + } + if mode != c.expMode { + t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec) + } + } +} diff --git a/components/engine/daemon/volumes_unix.go b/components/engine/daemon/volumes_unix.go new file mode 100644 index 0000000000..29dffa9ea0 --- /dev/null +++ b/components/engine/daemon/volumes_unix.go @@ -0,0 +1,219 @@ +// +build !windows + +// TODO(amitkris): We need to split this file for solaris. + +package daemon + +import ( + "encoding/json" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/pkg/errors" +) + +// setupMounts iterates through each of the mount points for a container and +// calls Setup() on each. It also looks to see if is a network mount such as +// /etc/resolv.conf, and if it is not, appends it to the array of mounts. +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mounts []container.Mount + // TODO: tmpfs mounts should be part of Mountpoints + tmpfsMounts := make(map[string]bool) + tmpfsMountInfo, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + for _, m := range tmpfsMountInfo { + tmpfsMounts[m.Destination] = true + } + for _, m := range c.MountPoints { + if tmpfsMounts[m.Destination] { + continue + } + if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { + return nil, err + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + path, err := m.Setup(c.MountLabel, rootUID, rootGID) + if err != nil { + return nil, err + } + if !c.TrySetNetworkMount(m.Destination, path) { + mnt := container.Mount{ + Source: path, + Destination: m.Destination, + Writable: m.RW, + Propagation: string(m.Propagation), + } + if m.Volume != nil { + attributes := map[string]string{ + "driver": m.Volume.DriverName(), + "container": c.ID, + "destination": m.Destination, + "read/write": strconv.FormatBool(m.RW), + "propagation": string(m.Propagation), + } + daemon.LogVolumeEvent(m.Volume.Name(), "mount", attributes) + } + mounts = append(mounts, mnt) + } + } + + mounts = sortMounts(mounts) + netMounts := c.NetworkMounts() + // if we are going to mount any of the network files from container + // metadata, the ownership must be set properly for potential container + // remapped root (user namespaces) + rootUID, rootGID := daemon.GetRemappedUIDGID() + for _, mount := range netMounts { + if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { + return nil, err + } + } + return append(mounts, netMounts...), nil +} + +// sortMounts sorts an array of mounts in lexicographic order. This ensure that +// when mounting, the mounts don't shadow other mounts. For example, if mounting +// /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. +func sortMounts(m []container.Mount) []container.Mount { + sort.Sort(mounts(m)) + return m +} + +// setBindModeIfNull is platform specific processing to ensure the +// shared mode is set to 'z' if it is null. This is called in the case +// of processing a named volume and not a typical bind. +func setBindModeIfNull(bind *volume.MountPoint) { + if bind.Mode == "" { + bind.Mode = "z" + } +} + +// migrateVolume links the contents of a volume created pre Docker 1.7 +// into the location expected by the local driver. +// It creates a symlink from DOCKER_ROOT/vfs/dir/VOLUME_ID to DOCKER_ROOT/volumes/VOLUME_ID/_container_data. +// It preserves the volume json configuration generated pre Docker 1.7 to be able to +// downgrade from Docker 1.7 to Docker 1.6 without losing volume compatibility. +func migrateVolume(id, vfs string) error { + l, err := volumedrivers.GetDriver(volume.DefaultDriverName) + if err != nil { + return err + } + + newDataPath := l.(*local.Root).DataPath(id) + fi, err := os.Stat(newDataPath) + if err != nil && !os.IsNotExist(err) { + return err + } + + if fi != nil && fi.IsDir() { + return nil + } + + return os.Symlink(vfs, newDataPath) +} + +// verifyVolumesInfo ports volumes configured for the containers pre docker 1.7. +// It reads the container configuration and creates valid mount points for the old volumes. +func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { + // Inspect old structures only when we're upgrading from old versions + // to versions >= 1.7 and the MountPoints has not been populated with volumes data. + type volumes struct { + Volumes map[string]string + VolumesRW map[string]bool + } + cfgPath, err := container.ConfigPath() + if err != nil { + return err + } + f, err := os.Open(cfgPath) + if err != nil { + return errors.Wrap(err, "could not open container config") + } + defer f.Close() + var cv volumes + if err := json.NewDecoder(f).Decode(&cv); err != nil { + return errors.Wrap(err, "could not decode container config") + } + + if len(container.MountPoints) == 0 && len(cv.Volumes) > 0 { + for destination, hostPath := range cv.Volumes { + vfsPath := filepath.Join(daemon.root, "vfs", "dir") + rw := cv.VolumesRW != nil && cv.VolumesRW[destination] + + if strings.HasPrefix(hostPath, vfsPath) { + id := filepath.Base(hostPath) + v, err := daemon.volumes.CreateWithRef(id, volume.DefaultDriverName, container.ID, nil, nil) + if err != nil { + return err + } + if err := migrateVolume(id, hostPath); err != nil { + return err + } + container.AddMountPointWithVolume(destination, v, true) + } else { // Bind mount + m := volume.MountPoint{Source: hostPath, Destination: destination, RW: rw} + container.MountPoints[destination] = &m + } + } + return container.ToDisk() + } + return nil +} + +func (daemon *Daemon) mountVolumes(container *container.Container) error { + mounts, err := daemon.setupMounts(container) + if err != nil { + return err + } + + for _, m := range mounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + return err + } + + var stat os.FileInfo + stat, err = os.Stat(m.Source) + if err != nil { + return err + } + if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { + return err + } + + opts := "rbind,ro" + if m.Writable { + opts = "rbind,rw" + } + + if err := mount.Mount(m.Source, dest, bindMountType, opts); err != nil { + return err + } + + // mountVolumes() seems to be called for temporary mounts + // outside the container. Soon these will be unmounted with + // lazy unmount option and given we have mounted the rbind, + // all the submounts will propagate if these are shared. If + // daemon is running in host namespace and has / as shared + // then these unmounts will propagate and unmount original + // mount as well. So make all these mounts rprivate. + // Do not use propagation property of volume as that should + // apply only when mounting happen inside the container. + if err := mount.MakeRPrivate(dest); err != nil { + return err + } + } + + return nil +} diff --git a/components/engine/daemon/volumes_unix_test.go b/components/engine/daemon/volumes_unix_test.go new file mode 100644 index 0000000000..4be7719fe7 --- /dev/null +++ b/components/engine/daemon/volumes_unix_test.go @@ -0,0 +1,259 @@ +// +build !windows + +package daemon + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" +) + +func TestBackportMountSpec(t *testing.T) { + d := Daemon{containers: container.NewMemoryStore()} + + c := &container.Container{ + CommonContainer: container.CommonContainer{ + State: &container.State{}, + MountPoints: map[string]*volume.MountPoint{ + "/apple": {Destination: "/apple", Source: "/var/lib/docker/volumes/12345678", Name: "12345678", RW: true, CopyData: true}, // anonymous volume + "/banana": {Destination: "/banana", Source: "/var/lib/docker/volumes/data", Name: "data", RW: true, CopyData: true}, // named volume + "/cherry": {Destination: "/cherry", Source: "/var/lib/docker/volumes/data", Name: "data", CopyData: true}, // RO named volume + "/dates": {Destination: "/dates", Source: "/var/lib/docker/volumes/data", Name: "data"}, // named volume nocopy + "/elderberry": {Destination: "/elderberry", Source: "/var/lib/docker/volumes/data", Name: "data"}, // masks anon vol + "/fig": {Destination: "/fig", Source: "/data", RW: true}, // RW bind + "/guava": {Destination: "/guava", Source: "/data", RW: false, Propagation: "shared"}, // RO bind + propagation + "/kumquat": {Destination: "/kumquat", Name: "data", RW: false, CopyData: true}, // volumes-from + + // partially configured mountpoint due to #32613 + // specifically, `mp.Spec.Source` is not set + "/honeydew": { + Type: mounttypes.TypeVolume, + Destination: "/honeydew", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{Type: mounttypes.TypeVolume, Target: "/honeydew", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, + }, + + // from hostconfig.Mounts + "/jambolan": { + Type: mounttypes.TypeVolume, + Destination: "/jambolan", + Source: "/var/lib/docker/volumes/data", + RW: true, + Name: "data", + Spec: mounttypes.Mount{Type: mounttypes.TypeVolume, Target: "/jambolan", Source: "data"}, + }, + }, + HostConfig: &containertypes.HostConfig{ + Binds: []string{ + "data:/banana", + "data:/cherry:ro", + "data:/dates:ro,nocopy", + "data:/elderberry:ro,nocopy", + "/data:/fig", + "/data:/guava:ro,shared", + "data:/honeydew:nocopy", + }, + VolumesFrom: []string{"1:ro"}, + Mounts: []mounttypes.Mount{ + {Type: mounttypes.TypeVolume, Target: "/jambolan"}, + }, + }, + Config: &containertypes.Config{Volumes: map[string]struct{}{ + "/apple": {}, + "/elderberry": {}, + }}, + }} + + d.containers.Add("1", &container.Container{ + CommonContainer: container.CommonContainer{ + State: &container.State{}, + ID: "1", + MountPoints: map[string]*volume.MountPoint{ + "/kumquat": {Destination: "/kumquat", Name: "data", RW: false, CopyData: true}, + }, + HostConfig: &containertypes.HostConfig{ + Binds: []string{ + "data:/kumquat:ro", + }, + }, + }, + }) + + type expected struct { + mp *volume.MountPoint + comment string + } + + pretty := func(mp *volume.MountPoint) string { + b, err := json.MarshalIndent(mp, "\t", " ") + if err != nil { + return fmt.Sprintf("%#v", mp) + } + return string(b) + } + + for _, x := range []expected{ + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/apple", + RW: true, + Name: "12345678", + Source: "/var/lib/docker/volumes/12345678", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "", + Target: "/apple", + }, + }, + comment: "anonymous volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/banana", + RW: true, + Name: "data", + Source: "/var/lib/docker/volumes/data", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/banana", + }, + }, + comment: "named volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/cherry", + Name: "data", + Source: "/var/lib/docker/volumes/data", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/cherry", + ReadOnly: true, + }, + }, + comment: "read-only named volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/dates", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/dates", + ReadOnly: true, + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "named volume with nocopy", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/elderberry", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/elderberry", + ReadOnly: true, + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "masks an anonymous volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeBind, + Destination: "/fig", + Source: "/data", + RW: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/data", + Target: "/fig", + }, + }, + comment: "bind mount with read/write", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeBind, + Destination: "/guava", + Source: "/data", + RW: false, + Propagation: "shared", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/data", + Target: "/guava", + ReadOnly: true, + BindOptions: &mounttypes.BindOptions{Propagation: "shared"}, + }, + }, + comment: "bind mount with read/write + shared propgation", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/honeydew", + Source: "/var/lib/docker/volumes/data", + RW: true, + Propagation: "shared", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/honeydew", + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "partially configured named volume caused by #32613", + }, + { + mp: &(*c.MountPoints["/jambolan"]), // copy the mountpoint, expect no changes + comment: "volume defined in mounts API", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/kumquat", + Source: "/var/lib/docker/volumes/data", + RW: false, + Name: "data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/kumquat", + ReadOnly: true, + }, + }, + comment: "partially configured named volume caused by #32613", + }, + } { + + mp := c.MountPoints[x.mp.Destination] + d.backportMountSpec(c) + + if !reflect.DeepEqual(mp.Spec, x.mp.Spec) { + t.Fatalf("%s\nexpected:\n\t%s\n\ngot:\n\t%s", x.comment, pretty(x.mp), pretty(mp)) + } + } +} diff --git a/components/engine/daemon/volumes_windows.go b/components/engine/daemon/volumes_windows.go new file mode 100644 index 0000000000..b43de47b65 --- /dev/null +++ b/components/engine/daemon/volumes_windows.go @@ -0,0 +1,47 @@ +// +build windows + +package daemon + +import ( + "sort" + + "github.com/docker/docker/container" + "github.com/docker/docker/volume" +) + +// setupMounts configures the mount points for a container by appending each +// of the configured mounts on the container to the OCI mount structure +// which will ultimately be passed into the oci runtime during container creation. +// It also ensures each of the mounts are lexicographically sorted. + +// BUGBUG TODO Windows containerd. This would be much better if it returned +// an array of runtime spec mounts, not container mounts. Then no need to +// do multiple transitions. + +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mnts []container.Mount + for _, mount := range c.MountPoints { // type is volume.MountPoint + if err := daemon.lazyInitializeVolume(c.ID, mount); err != nil { + return nil, err + } + s, err := mount.Setup(c.MountLabel, 0, 0) + if err != nil { + return nil, err + } + + mnts = append(mnts, container.Mount{ + Source: s, + Destination: mount.Destination, + Writable: mount.RW, + }) + } + + sort.Sort(mounts(mnts)) + return mnts, nil +} + +// setBindModeIfNull is platform specific processing which is a no-op on +// Windows. +func setBindModeIfNull(bind *volume.MountPoint) { + return +} diff --git a/components/engine/daemon/wait.go b/components/engine/daemon/wait.go new file mode 100644 index 0000000000..76c16b9efc --- /dev/null +++ b/components/engine/daemon/wait.go @@ -0,0 +1,22 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "golang.org/x/net/context" +) + +// ContainerWait waits until the given container is in a certain state +// indicated by the given condition. If the container is not found, a nil +// channel and non-nil error is returned immediately. If the container is +// found, a status result will be sent on the returned channel once the wait +// condition is met or if an error occurs waiting for the container (such as a +// context timeout or cancellation). On a successful wait, the exit code of the +// container is returned in the status with a non-nil Err() value. +func (daemon *Daemon) ContainerWait(ctx context.Context, name string, condition container.WaitCondition) (<-chan container.StateStatus, error) { + cntr, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + return cntr.Wait(ctx, condition), nil +} diff --git a/components/engine/daemon/workdir.go b/components/engine/daemon/workdir.go new file mode 100644 index 0000000000..99a2a8ea57 --- /dev/null +++ b/components/engine/daemon/workdir.go @@ -0,0 +1,21 @@ +package daemon + +// ContainerCreateWorkdir creates the working directory. This solves the +// issue arising from https://github.com/docker/docker/issues/27545, +// which was initially fixed by https://github.com/docker/docker/pull/27884. But that fix +// was too expensive in terms of performance on Windows. Instead, +// https://github.com/docker/docker/pull/28514 introduces this new functionality +// where the builder calls into the backend here to create the working directory. +func (daemon *Daemon) ContainerCreateWorkdir(cID string) error { + container, err := daemon.GetContainer(cID) + if err != nil { + return err + } + err = daemon.Mount(container) + if err != nil { + return err + } + defer daemon.Unmount(container) + rootUID, rootGID := daemon.GetRemappedUIDGID() + return container.SetupWorkingDirectory(rootUID, rootGID) +} diff --git a/components/engine/distribution/config.go b/components/engine/distribution/config.go new file mode 100644 index 0000000000..f24678d0e2 --- /dev/null +++ b/components/engine/distribution/config.go @@ -0,0 +1,244 @@ +package distribution + +import ( + "encoding/json" + "fmt" + "io" + "runtime" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +// Config stores configuration for communicating +// with a registry. +type Config struct { + // MetaHeaders stores HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the pull + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // ImageStore manages images. + ImageStore ImageConfigStore + // ReferenceStore manages tags. This value is optional, when excluded + // content will not be tagged. + ReferenceStore refstore.Store + // RequireSchema2 ensures that only schema2 manifests are used. + RequireSchema2 bool +} + +// ImagePullConfig stores pull configuration. +type ImagePullConfig struct { + Config + + // DownloadManager manages concurrent pulls. + DownloadManager RootFSDownloadManager + // Schema2Types is the valid schema2 configuration types allowed + // by the pull operation. + Schema2Types []string +} + +// ImagePushConfig stores push configuration. +type ImagePushConfig struct { + Config + + // ConfigMediaType is the configuration media type for + // schema2 manifests. + ConfigMediaType string + // LayerStore manages layers. + LayerStore PushLayerProvider + // TrustKey is the private key for legacy signatures. This is typically + // an ephemeral key, since these signatures are no longer verified. + TrustKey libtrust.PrivateKey + // UploadManager dispatches uploads. + UploadManager *xfer.LayerUploadManager +} + +// ImageConfigStore handles storing and getting image configurations +// by digest. Allows getting an image configurations rootfs from the +// configuration. +type ImageConfigStore interface { + Put([]byte) (digest.Digest, error) + Get(digest.Digest) ([]byte, error) + RootFSFromConfig([]byte) (*image.RootFS, error) +} + +// PushLayerProvider provides layers to be pushed by ChainID. +type PushLayerProvider interface { + Get(layer.ChainID) (PushLayer, error) +} + +// PushLayer is a pushable layer with metadata about the layer +// and access to the content of the layer. +type PushLayer interface { + ChainID() layer.ChainID + DiffID() layer.DiffID + Parent() PushLayer + Open() (io.ReadCloser, error) + Size() (int64, error) + MediaType() string + Release() +} + +// RootFSDownloadManager handles downloading of the rootfs +type RootFSDownloadManager interface { + // Download downloads the layers into the given initial rootfs and + // returns the final rootfs. + // Given progress output to track download progress + // Returns function to release download resources + Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) +} + +type imageConfigStore struct { + image.Store +} + +// NewImageConfigStoreFromStore returns an ImageConfigStore backed +// by an image.Store for container images. +func NewImageConfigStoreFromStore(is image.Store) ImageConfigStore { + return &imageConfigStore{ + Store: is, + } +} + +func (s *imageConfigStore) Put(c []byte) (digest.Digest, error) { + id, err := s.Store.Create(c) + return digest.Digest(id), err +} + +func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) { + img, err := s.Store.Get(image.IDFromDigest(d)) + if err != nil { + return nil, err + } + return img.RawJSON(), nil +} + +func (s *imageConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { + var unmarshalledConfig image.Image + if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { + return nil, err + } + + // fail immediately on Windows when downloading a non-Windows image + // and vice versa + if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" { + return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } else if runtime.GOOS != "windows" && unmarshalledConfig.OS == "windows" { + return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } + + return unmarshalledConfig.RootFS, nil +} + +type storeLayerProvider struct { + ls layer.Store +} + +// NewLayerProviderFromStore returns a layer provider backed by +// an instance of LayerStore. Only getting layers as gzipped +// tars is supported. +func NewLayerProviderFromStore(ls layer.Store) PushLayerProvider { + return &storeLayerProvider{ + ls: ls, + } +} + +func (p *storeLayerProvider) Get(lid layer.ChainID) (PushLayer, error) { + if lid == "" { + return &storeLayer{ + Layer: layer.EmptyLayer, + }, nil + } + l, err := p.ls.Get(lid) + if err != nil { + return nil, err + } + + sl := storeLayer{ + Layer: l, + ls: p.ls, + } + if d, ok := l.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + }, nil + } + + return &sl, nil +} + +type storeLayer struct { + layer.Layer + ls layer.Store +} + +func (l *storeLayer) Parent() PushLayer { + p := l.Layer.Parent() + if p == nil { + return nil + } + sl := storeLayer{ + Layer: p, + ls: l.ls, + } + if d, ok := p.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + } + } + + return &sl +} + +func (l *storeLayer) Open() (io.ReadCloser, error) { + return l.Layer.TarStream() +} + +func (l *storeLayer) Size() (int64, error) { + return l.Layer.DiffSize() +} + +func (l *storeLayer) MediaType() string { + // layer store always returns uncompressed tars + return schema2.MediaTypeUncompressedLayer +} + +func (l *storeLayer) Release() { + if l.ls != nil { + layer.ReleaseAndLog(l.ls, l.Layer) + } +} + +type describableStoreLayer struct { + storeLayer + describable distribution.Describable +} + +func (l *describableStoreLayer) Descriptor() distribution.Descriptor { + return l.describable.Descriptor() +} diff --git a/components/engine/distribution/errors.go b/components/engine/distribution/errors.go new file mode 100644 index 0000000000..adf272a5cf --- /dev/null +++ b/components/engine/distribution/errors.go @@ -0,0 +1,159 @@ +package distribution + +import ( + "net/url" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/distribution/xfer" + "github.com/pkg/errors" +) + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool + // transportOK is set to true if we managed to speak HTTP with the + // registry. This confirms that we're using appropriate TLS settings + // (or lack of TLS). + transportOK bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.Cause().Error() +} + +func (f fallbackError) Cause() error { + return f.err +} + +// shouldV2Fallback returns true if this error is a reason to fall back to v1. +func shouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} + +// TranslatePullError is used to convert an error from a registry pull +// operation to an error representing the entire pull operation. Any error +// information which is not used by the returned error gets output to +// log at info level. +func TranslatePullError(err error, ref reference.Named) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + for _, extra := range v[1:] { + logrus.Infof("Ignoring extra error returned from registry: %v", extra) + } + return TranslatePullError(v[0], ref) + } + case errcode.Error: + var newErr error + switch v.Code { + case errcode.ErrorCodeDenied: + // ErrorCodeDenied is used when access to the repository was denied + newErr = errors.Errorf("repository %s not found: does not exist or no pull access", reference.FamiliarName(ref)) + case v2.ErrorCodeManifestUnknown: + newErr = errors.Errorf("manifest for %s not found", reference.FamiliarString(ref)) + case v2.ErrorCodeNameUnknown: + newErr = errors.Errorf("repository %s not found", reference.FamiliarName(ref)) + } + if newErr != nil { + logrus.Infof("Translating %q to %q", err, newErr) + return newErr + } + case xfer.DoNotRetry: + return TranslatePullError(v.Err, ref) + } + + return err +} + +// continueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func continueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0]) + case ErrNoSupport: + return continueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case ImageConfigPullError: + return false + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the +// operation after this error. +func retryOnError(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + return retryOnError(v[0]) + } + case errcode.Error: + switch v.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests, v2.ErrorCodeNameUnknown: + return xfer.DoNotRetry{Err: err} + } + case *url.Error: + switch v.Err { + case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: + return xfer.DoNotRetry{Err: v.Err} + } + return retryOnError(v.Err) + case *client.UnexpectedHTTPResponseError: + return xfer.DoNotRetry{Err: err} + case error: + if err == distribution.ErrBlobUnknown { + return xfer.DoNotRetry{Err: err} + } + if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { + return xfer.DoNotRetry{Err: err} + } + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return err +} diff --git a/components/engine/distribution/fixtures/validate_manifest/bad_manifest b/components/engine/distribution/fixtures/validate_manifest/bad_manifest new file mode 100644 index 0000000000..a1f02a62a3 --- /dev/null +++ b/components/engine/distribution/fixtures/validate_manifest/bad_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 2, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/components/engine/distribution/fixtures/validate_manifest/extra_data_manifest b/components/engine/distribution/fixtures/validate_manifest/extra_data_manifest new file mode 100644 index 0000000000..beec19a801 --- /dev/null +++ b/components/engine/distribution/fixtures/validate_manifest/extra_data_manifest @@ -0,0 +1,46 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "fsLayers": [ + { + "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/components/engine/distribution/fixtures/validate_manifest/good_manifest b/components/engine/distribution/fixtures/validate_manifest/good_manifest new file mode 100644 index 0000000000..b107de3226 --- /dev/null +++ b/components/engine/distribution/fixtures/validate_manifest/good_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} \ No newline at end of file diff --git a/components/engine/distribution/metadata/metadata.go b/components/engine/distribution/metadata/metadata.go new file mode 100644 index 0000000000..05ba4f817d --- /dev/null +++ b/components/engine/distribution/metadata/metadata.go @@ -0,0 +1,75 @@ +package metadata + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +// Store implements a K/V store for mapping distribution-related IDs +// to on-disk layer IDs and image IDs. The namespace identifies the type of +// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. +type Store interface { + // Get retrieves data by namespace and key. + Get(namespace string, key string) ([]byte, error) + // Set writes data indexed by namespace and key. + Set(namespace, key string, value []byte) error + // Delete removes data indexed by namespace and key. + Delete(namespace, key string) error +} + +// FSMetadataStore uses the filesystem to associate metadata with layer and +// image IDs. +type FSMetadataStore struct { + sync.RWMutex + basePath string +} + +// NewFSMetadataStore creates a new filesystem-based metadata store. +func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { + if err := os.MkdirAll(basePath, 0700); err != nil { + return nil, err + } + return &FSMetadataStore{ + basePath: basePath, + }, nil +} + +func (store *FSMetadataStore) path(namespace, key string) string { + return filepath.Join(store.basePath, namespace, key) +} + +// Get retrieves data by namespace and key. The data is read from a file named +// after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { + store.RLock() + defer store.RUnlock() + + return ioutil.ReadFile(store.path(namespace, key)) +} + +// Set writes data indexed by namespace and key. The data is written to a file +// named after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + return ioutils.AtomicWriteFile(path, value, 0644) +} + +// Delete removes data indexed by namespace and key. The data file named after +// the key, stored in the namespace's directory is deleted. +func (store *FSMetadataStore) Delete(namespace, key string) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + return os.Remove(path) +} diff --git a/components/engine/distribution/metadata/v1_id_service.go b/components/engine/distribution/metadata/v1_id_service.go new file mode 100644 index 0000000000..f262d4dc34 --- /dev/null +++ b/components/engine/distribution/metadata/v1_id_service.go @@ -0,0 +1,51 @@ +package metadata + +import ( + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/pkg/errors" +) + +// V1IDService maps v1 IDs to layers on disk. +type V1IDService struct { + store Store +} + +// NewV1IDService creates a new V1 ID mapping service. +func NewV1IDService(store Store) *V1IDService { + return &V1IDService{ + store: store, + } +} + +// namespace returns the namespace used by this service. +func (idserv *V1IDService) namespace() string { + return "v1id" +} + +// Get finds a layer by its V1 ID. +func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { + if idserv.store == nil { + return "", errors.New("no v1IDService storage") + } + if err := v1.ValidateID(v1ID); err != nil { + return layer.DiffID(""), err + } + + idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) + if err != nil { + return layer.DiffID(""), err + } + return layer.DiffID(idBytes), nil +} + +// Set associates an image with a V1 ID. +func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { + if idserv.store == nil { + return nil + } + if err := v1.ValidateID(v1ID); err != nil { + return err + } + return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) +} diff --git a/components/engine/distribution/metadata/v1_id_service_test.go b/components/engine/distribution/metadata/v1_id_service_test.go new file mode 100644 index 0000000000..556886581e --- /dev/null +++ b/components/engine/distribution/metadata/v1_id_service_test.go @@ -0,0 +1,83 @@ +package metadata + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/layer" +) + +func TestV1IDService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "v1-id-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + v1IDService := NewV1IDService(metadataStore) + + testVectors := []struct { + registry string + v1ID string + layerID layer.DiffID + }{ + { + registry: "registry1", + v1ID: "f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937", + layerID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + }, + { + registry: "registry2", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + }, + { + registry: "registry1", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + }, + } + + // Set some associations + for _, vec := range testVectors { + err := v1IDService.Set(vec.v1ID, vec.registry, vec.layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + layerID, err := v1IDService.Get(vec.v1ID, vec.registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != vec.layerID { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test Get on a nonexistent entry + _, err = v1IDService.Get("82379823067823853223359023576437723560923756b03560378f4497753917", "registry1") + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = v1IDService.Set(testVectors[0].v1ID, testVectors[0].registry, testVectors[1].layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + layerID, err := v1IDService.Get(testVectors[0].v1ID, testVectors[0].registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != testVectors[1].layerID { + t.Fatal("Get returned incorrect layer ID") + } +} diff --git a/components/engine/distribution/metadata/v2_metadata_service.go b/components/engine/distribution/metadata/v2_metadata_service.go new file mode 100644 index 0000000000..c31c7214e7 --- /dev/null +++ b/components/engine/distribution/metadata/v2_metadata_service.go @@ -0,0 +1,241 @@ +package metadata + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +// V2MetadataService maps layer IDs to a set of known metadata for +// the layer. +type V2MetadataService interface { + GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) + GetDiffID(dgst digest.Digest) (layer.DiffID, error) + Add(diffID layer.DiffID, metadata V2Metadata) error + TagAndAdd(diffID layer.DiffID, hmacKey []byte, metadata V2Metadata) error + Remove(metadata V2Metadata) error +} + +// v2MetadataService implements V2MetadataService +type v2MetadataService struct { + store Store +} + +var _ V2MetadataService = &v2MetadataService{} + +// V2Metadata contains the digest and source repository information for a layer. +type V2Metadata struct { + Digest digest.Digest + SourceRepository string + // HMAC hashes above attributes with recent authconfig digest used as a key in order to determine matching + // metadata entries accompanied by the same credentials without actually exposing them. + HMAC string +} + +// CheckV2MetadataHMAC returns true if the given "meta" is tagged with a hmac hashed by the given "key". +func CheckV2MetadataHMAC(meta *V2Metadata, key []byte) bool { + if len(meta.HMAC) == 0 || len(key) == 0 { + return len(meta.HMAC) == 0 && len(key) == 0 + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + expectedMac := mac.Sum(nil) + + storedMac, err := hex.DecodeString(meta.HMAC) + if err != nil { + return false + } + + return hmac.Equal(storedMac, expectedMac) +} + +// ComputeV2MetadataHMAC returns a hmac for the given "meta" hash by the given key. +func ComputeV2MetadataHMAC(key []byte, meta *V2Metadata) string { + if len(key) == 0 || meta == nil { + return "" + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + return hex.EncodeToString(mac.Sum(nil)) +} + +// ComputeV2MetadataHMACKey returns a key for the given "authConfig" that can be used to hash v2 metadata +// entries. +func ComputeV2MetadataHMACKey(authConfig *types.AuthConfig) ([]byte, error) { + if authConfig == nil { + return nil, nil + } + key := authConfigKeyInput{ + Username: authConfig.Username, + Password: authConfig.Password, + Auth: authConfig.Auth, + IdentityToken: authConfig.IdentityToken, + RegistryToken: authConfig.RegistryToken, + } + buf, err := json.Marshal(&key) + if err != nil { + return nil, err + } + return []byte(digest.FromBytes([]byte(buf))), nil +} + +// authConfigKeyInput is a reduced AuthConfig structure holding just relevant credential data eligible for +// hmac key creation. +type authConfigKeyInput struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + IdentityToken string `json:"identitytoken,omitempty"` + RegistryToken string `json:"registrytoken,omitempty"` +} + +// maxMetadata is the number of metadata entries to keep per layer DiffID. +const maxMetadata = 50 + +// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. +func NewV2MetadataService(store Store) V2MetadataService { + return &v2MetadataService{ + store: store, + } +} + +func (serv *v2MetadataService) diffIDNamespace() string { + return "v2metadata-by-diffid" +} + +func (serv *v2MetadataService) digestNamespace() string { + return "diffid-by-digest" +} + +func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string { + return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() +} + +func (serv *v2MetadataService) digestKey(dgst digest.Digest) string { + return string(dgst.Algorithm()) + "/" + dgst.Hex() +} + +// GetMetadata finds the metadata associated with a layer DiffID. +func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { + if serv.store == nil { + return nil, errors.New("no metadata storage") + } + jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + if err != nil { + return nil, err + } + + var metadata []V2Metadata + if err := json.Unmarshal(jsonBytes, &metadata); err != nil { + return nil, err + } + + return metadata, nil +} + +// GetDiffID finds a layer DiffID from a digest. +func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + if serv.store == nil { + return layer.DiffID(""), errors.New("no metadata storage") + } + diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) + if err != nil { + return layer.DiffID(""), err + } + + return layer.DiffID(diffIDBytes), nil +} + +// Add associates metadata with a layer DiffID. If too many metadata entries are +// present, the oldest one is dropped. +func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an add becomes a no-op. + // TODO: implement in memory storage + return nil + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + newMetadata = append(newMetadata, metadata) + + if len(newMetadata) > maxMetadata { + newMetadata = newMetadata[len(newMetadata)-maxMetadata:] + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) + if err != nil { + return err + } + + return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) +} + +// TagAndAdd amends the given "meta" for hmac hashed by the given "hmacKey" and associates it with a layer +// DiffID. If too many metadata entries are present, the oldest one is dropped. +func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta V2Metadata) error { + meta.HMAC = ComputeV2MetadataHMAC(hmacKey, &meta) + return serv.Add(diffID, meta) +} + +// Remove unassociates a metadata entry from a layer DiffID. +func (serv *v2MetadataService) Remove(metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an remove becomes a no-op. + // TODO: implement in memory storage + return nil + } + diffID, err := serv.GetDiffID(metadata.Digest) + if err != nil { + return err + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + if len(newMetadata) == 0 { + return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) +} diff --git a/components/engine/distribution/metadata/v2_metadata_service_test.go b/components/engine/distribution/metadata/v2_metadata_service_test.go new file mode 100644 index 0000000000..8e3e4614c0 --- /dev/null +++ b/components/engine/distribution/metadata/v2_metadata_service_test.go @@ -0,0 +1,115 @@ +package metadata + +import ( + "encoding/hex" + "io/ioutil" + "math/rand" + "os" + "reflect" + "testing" + + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +func TestV2MetadataService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + V2MetadataService := NewV2MetadataService(metadataStore) + + tooManyBlobSums := make([]V2Metadata, 100) + for i := range tooManyBlobSums { + randDigest := randomDigest() + tooManyBlobSums[i] = V2Metadata{Digest: randDigest} + } + + testVectors := []struct { + diffID layer.DiffID + metadata []V2Metadata + }{ + { + diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + metadata: []V2Metadata{ + {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, + }, + }, + { + diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + metadata: []V2Metadata{ + {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, + {Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")}, + }, + }, + { + diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + metadata: tooManyBlobSums, + }, + } + + // Set some associations + for _, vec := range testVectors { + for _, blobsum := range vec.metadata { + err := V2MetadataService.Add(vec.diffID, blobsum) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + metadata, err := V2MetadataService.GetMetadata(vec.diffID) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + expectedMetadataEntries := len(vec.metadata) + if expectedMetadataEntries > 50 { + expectedMetadataEntries = 50 + } + if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test GetMetadata on a nonexistent entry + _, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Test GetDiffID on a nonexistent entry + _, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0]) + if err != nil { + t.Fatalf("error calling Add: %v", err) + } + diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest) + if err != nil { + t.Fatalf("error calling GetDiffID: %v", err) + } + if diffID != testVectors[1].diffID { + t.Fatal("GetDiffID returned incorrect diffID") + } +} + +func randomDigest() digest.Digest { + b := [32]byte{} + for i := 0; i < len(b); i++ { + b[i] = byte(rand.Intn(256)) + } + d := hex.EncodeToString(b[:]) + return digest.Digest("sha256:" + d) +} diff --git a/components/engine/distribution/pull.go b/components/engine/distribution/pull.go new file mode 100644 index 0000000000..c5bdbd6d3b --- /dev/null +++ b/components/engine/distribution/pull.go @@ -0,0 +1,198 @@ +package distribution + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +// Puller is an interface that abstracts pulling for different API versions. +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + Pull(ctx context.Context, ref reference.Named) error +} + +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1Puller{ + v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + // makes sure name is not `scratch` + if err := ValidateRepoName(repoInfo.Name); err != nil { + return err + } + + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return err + } + + var ( + lastErr error + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if an ErrNoSupport error is encountered, it will be saved in lastErr. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in lastErr. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + + // confirmedV2 is set to true if a pull attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + for _, endpoint := range endpoints { + if imagePullConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to pull %s from %s %s", reference.FamiliarName(repoInfo.Name), endpoint.URL, endpoint.Version) + + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + lastErr = err + continue + } + if err := puller.Pull(ctx, ref); err != nil { + // Was this pull cancelled? If so, don't try to fall + // back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // append subsequent errors + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + // append subsequent errors + lastErr = err + } + logrus.Infof("Attempting next endpoint for pull after error: %v", err) + continue + } + logrus.Errorf("Not continuing with pull after error: %v", err) + return TranslatePullError(err, ref) + } + + imagePullConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "pull") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", reference.FamiliarString(ref)) + } + + return TranslatePullError(lastErr, ref) +} + +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. +func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { + if layersDownloaded { + progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) + } else { + progress.Message(out, "", "Status: Image is up to date for "+requestedTag) + } +} + +// ValidateRepoName validates the name of a repository. +func ValidateRepoName(name reference.Named) error { + if reference.FamiliarName(name) == api.NoBaseImageSpecifier { + return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) + } + return nil +} + +func addDigestReference(store refstore.Store, ref reference.Named, dgst digest.Digest, id digest.Digest) error { + dgstRef, err := reference.WithDigest(reference.TrimNamed(ref), dgst) + if err != nil { + return err + } + + if oldTagID, err := store.Get(dgstRef); err == nil { + if oldTagID != id { + // Updating digests not supported by reference store + logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id) + } + return nil + } else if err != refstore.ErrDoesNotExist { + return err + } + + return store.AddDigest(dgstRef, id, true) +} diff --git a/components/engine/distribution/pull_v1.go b/components/engine/distribution/pull_v1.go new file mode 100644 index 0000000000..d873d338cd --- /dev/null +++ b/components/engine/distribution/pull_v1.go @@ -0,0 +1,368 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Puller struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + session *registry.Session +} + +func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} + } + + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was ReceiveTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + logrus.Debugf("Fallback from error: %s", err) + return fallbackError{err: err} + } + if err := p.pullRepository(ctx, ref); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + progress.Message(p.config.ProgressOutput, "", p.repoInfo.Name.Name()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") + + return nil +} + +func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { + progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.Name.Name()) + + tagged, isTagged := ref.(reference.NamedTagged) + + repoData, err := p.session.GetRepositoryData(p.repoInfo.Name) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + if isTagged { + return fmt.Errorf("Error: image %s:%s not found", reference.Path(p.repoInfo.Name), tagged.Tag()) + } + return fmt.Errorf("Error: image %s not found", reference.Path(p.repoInfo.Name)) + } + // Unexpected HTTP error + return err + } + + logrus.Debug("Retrieving the tag list") + var tagsList map[string]string + if !isTagged { + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.Name) + } else { + var tagID string + tagsList = make(map[string]string) + tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.Name, tagged.Tag()) + if err == registry.ErrRepoNotFound { + return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.Name.Name()) + } + tagsList[tagged.Tag()] = tagID + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + layersDownloaded := false + for _, imgData := range repoData.ImgList { + if isTagged && imgData.Tag != tagged.Tag() { + continue + } + + err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) + if err != nil { + return err + } + } + + writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) + return nil +} + +func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { + if img.Tag == "" { + logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + return nil + } + + localNameRef, err := reference.WithTag(p.repoInfo.Name, img.Tag) + if err != nil { + retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) + logrus.Debug(retErr.Error()) + return retErr + } + + if err := v1.ValidateID(img.ID); err != nil { + return err + } + + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.Name.Name()) + success := false + var lastErr error + for _, ep := range p.repoInfo.Index.Mirrors { + ep += "v1/" + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.Name.Name(), ep)) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err) + continue + } + success = true + break + } + if !success { + for _, ep := range repoData.Endpoints { + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.Name.Name(), ep) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err) + continue + } + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.Name.Name(), lastErr) + progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) + return err + } + return nil +} + +func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { + var history []string + history, err = p.session.GetRemoteHistory(v1ID, endpoint) + if err != nil { + return err + } + if len(history) < 1 { + return fmt.Errorf("empty history for image %s", v1ID) + } + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") + + var ( + descriptors []xfer.DownloadDescriptor + newHistory []image.History + imgJSON []byte + imgSize int64 + ) + + // Iterate over layers, in order from bottom-most to top-most. Download + // config for all layers and create descriptors. + for i := len(history) - 1; i >= 0; i-- { + v1LayerID := history[i] + imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) + if err != nil { + return err + } + + // Create a new-style config from the legacy configs + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return err + } + newHistory = append(newHistory, h) + + layerDescriptor := &v1LayerDescriptor{ + v1LayerID: v1LayerID, + indexName: p.repoInfo.Index.Name, + endpoint: endpoint, + v1IDService: p.v1IDService, + layersDownloaded: layersDownloaded, + layerSize: imgSize, + session: p.session, + } + + descriptors = append(descriptors, layerDescriptor) + } + + rootFS := image.NewRootFS() + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return err + } + defer release() + + config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) + if err != nil { + return err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return err + } + + if p.config.ReferenceStore != nil { + if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { + return err + } + } + + return nil +} + +func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") + + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) + if err != nil && j == retries { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") + return nil, 0, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + + return imgJSON, imgSize, nil + } + + // not reached + return nil, 0, nil +} + +type v1LayerDescriptor struct { + v1LayerID string + indexName string + endpoint string + v1IDService *metadata.V1IDService + layersDownloaded *bool + layerSize int64 + session *registry.Session + tmpFile *os.File +} + +func (ld *v1LayerDescriptor) Key() string { + return "v1:" + ld.v1LayerID +} + +func (ld *v1LayerDescriptor) ID() string { + return stringid.TruncateID(ld.v1LayerID) +} + +func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) +} + +func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + progress.Update(progressOutput, ld.ID(), "Pulling fs layer") + layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) + if err != nil { + progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() { + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + *ld.layersDownloaded = true + + ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob") + if err != nil { + layerReader.Close() + return nil, 0, err + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") + defer reader.Close() + + _, err = io.Copy(ld.tmpFile, reader) + if err != nil { + ld.Close() + return nil, 0, err + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name()) + + ld.tmpFile.Seek(0, 0) + + // hand off the temporary file to the download manager, so it will only + // be closed once + tmpFile := ld.tmpFile + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), ld.layerSize, nil +} + +func (ld *v1LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile = nil + } +} + +func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) +} diff --git a/components/engine/distribution/pull_v2.go b/components/engine/distribution/pull_v2.go new file mode 100644 index 0000000000..08df6b768f --- /dev/null +++ b/components/engine/distribution/pull_v2.go @@ -0,0 +1,899 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +var ( + errRootFSMismatch = errors.New("layers from manifest don't match image configuration") + errRootFSInvalid = errors.New("invalid rootfs in image configuration") +) + +// ImageConfigPullError is an error pulling the image config blob +// (only applies to schema2). +type ImageConfigPullError struct { + Err error +} + +// Error returns the error string for ImageConfigPullError. +func (e ImageConfigPullError) Error() string { + return "error pulling image configuration: " + e.Err.Error() +} + +type v2Puller struct { + V2MetadataService metadata.V2MetadataService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + repo distribution.Repository + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + logrus.Warnf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pullV2Repository(ctx, ref); err != nil { + if _, ok := err.(fallbackError); ok { + return err + } + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { + var layersDownloaded bool + if !reference.IsNameOnly(ref) { + layersDownloaded, err = p.pullV2Tag(ctx, ref) + if err != nil { + return err + } + } else { + tags, err := p.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + return allowV1Fallback(err) + } + + // The v2 registry knows about this repository, so we will not + // allow fallback to the v1 protocol even if we encounter an + // error later on. + p.confirmedV2 = true + + for _, tag := range tags { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + return err + } + pulledNew, err := p.pullV2Tag(ctx, tagRef) + if err != nil { + // Since this is the pull-all-tags case, don't + // allow an error pulling a particular tag to + // make the whole pull fall back to v1. + if fallbackErr, ok := err.(fallbackError); ok { + return fallbackErr.err + } + return err + } + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + layersDownloaded = layersDownloaded || pulledNew + } + } + + writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) + + return nil +} + +type v2LayerDescriptor struct { + digest digest.Digest + diffID layer.DiffID + repoInfo *registry.RepositoryInfo + repo distribution.Repository + V2MetadataService metadata.V2MetadataService + tmpFile *os.File + verifier digest.Verifier + src distribution.Descriptor +} + +func (ld *v2LayerDescriptor) Key() string { + return "v2:" + ld.digest.String() +} + +func (ld *v2LayerDescriptor) ID() string { + return stringid.TruncateID(ld.digest.String()) +} + +func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + if ld.diffID != "" { + return ld.diffID, nil + } + return ld.V2MetadataService.GetDiffID(ld.digest) +} + +func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + logrus.Debugf("pulling blob %q", ld.digest) + + var ( + err error + offset int64 + ) + + if ld.tmpFile == nil { + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else { + offset, err = ld.tmpFile.Seek(0, os.SEEK_END) + if err != nil { + logrus.Debugf("error seeking to end of download file: %v", err) + offset = 0 + + ld.tmpFile.Close() + if err := os.Remove(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else if offset != 0 { + logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) + } + } + + tmpFile := ld.tmpFile + + layerDownload, err := ld.open(ctx) + if err != nil { + logrus.Errorf("Error initiating layer download: %v", err) + return nil, 0, retryOnError(err) + } + + if offset != 0 { + _, err := layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + } + size, err := layerDownload.Seek(0, os.SEEK_END) + if err != nil { + // Seek failed, perhaps because there was no Content-Length + // header. This shouldn't fail the download, because we can + // still continue without a progress bar. + size = 0 + } else { + if size != 0 && offset > size { + logrus.Debug("Partial download is larger than full blob. Starting over") + offset = 0 + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + // Restore the seek offset either at the beginning of the + // stream, or just after the last byte we have from previous + // attempts. + _, err = layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + return nil, 0, err + } + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") + defer reader.Close() + + if ld.verifier == nil { + ld.verifier = ld.digest.Verifier() + } + + _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) + if err != nil { + if err == transport.ErrWrongCodeForByteRange { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + return nil, 0, retryOnError(err) + } + + progress.Update(progressOutput, ld.ID(), "Verifying Checksum") + + if !ld.verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) + logrus.Error(err) + + // Allow a retry if this digest verification error happened + // after a resumed download. + if offset != 0 { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) + + _, err = tmpFile.Seek(0, os.SEEK_SET) + if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + ld.tmpFile = nil + ld.verifier = nil + return nil, 0, xfer.DoNotRetry{Err: err} + } + + // hand off the temporary file to the download manager, so it will only + // be closed once + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), size, nil +} + +func (ld *v2LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + } +} + +func (ld *v2LayerDescriptor) truncateDownloadFile() error { + // Need a new hash context since we will be redoing the download + ld.verifier = nil + + if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { + logrus.Errorf("error seeking to beginning of download file: %v", err) + return err + } + + if err := ld.tmpFile.Truncate(0); err != nil { + logrus.Errorf("error truncating download file: %v", err) + return err + } + + return nil +} + +func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) +} + +func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return false, err + } + + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + ) + if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return false, err + } + tagOrDigest = digested.Digest().String() + } else if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else { + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) + } + + if manifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + if m, ok := manifest.(*schema2.DeserializedManifest); ok { + var allowedMediatype bool + for _, t := range p.config.Schema2Types { + if m.Manifest.Config.MediaType == t { + allowedMediatype = true + break + } + } + if !allowedMediatype { + configClass := mediaTypeClasses[m.Manifest.Config.MediaType] + if configClass == "" { + configClass = "unknown" + } + return false, fmt.Errorf("Encountered remote %q(%s) when fetching", m.Manifest.Config.MediaType, configClass) + } + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + p.confirmedV2 = true + + logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) + progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) + + var ( + id digest.Digest + manifestDigest digest.Digest + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + if p.config.RequireSchema2 { + return false, fmt.Errorf("invalid manifest: not schema2") + } + id, manifestDigest, err = p.pullSchema1(ctx, ref, v) + if err != nil { + return false, err + } + case *schema2.DeserializedManifest: + id, manifestDigest, err = p.pullSchema2(ctx, ref, v) + if err != nil { + return false, err + } + case *manifestlist.DeserializedManifestList: + id, manifestDigest, err = p.pullManifestList(ctx, ref, v) + if err != nil { + return false, err + } + default: + return false, errors.New("unsupported manifest format") + } + + progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) + + if p.config.ReferenceStore != nil { + oldTagID, err := p.config.ReferenceStore.Get(ref) + if err == nil { + if oldTagID == id { + return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) + } + } else if err != refstore.ErrDoesNotExist { + return false, err + } + + if canonical, ok := ref.(reference.Canonical); ok { + if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { + return false, err + } + } else { + if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return false, err + } + if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { + return false, err + } + } + } + return true, nil +} + +func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return "", "", err + } + + rootFS := image.NewRootFS() + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return "", "", err + } + + var descriptors []xfer.DownloadDescriptor + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + blobSum := verifiedManifest.FSLayers[i].BlobSum + + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return "", "", err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return "", "", err + } + history = append(history, h) + + if throwAway.ThrowAway { + continue + } + + layerDescriptor := &v2LayerDescriptor{ + digest: blobSum, + repoInfo: p.repoInfo, + repo: p.repo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return "", "", err + } + defer release() + + config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) + if err != nil { + return "", "", err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return "", "", err + } + + manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) + + return imageID, manifestDigest, nil +} + +func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return "", "", err + } + + target := mfst.Target() + if _, err := p.config.ImageStore.Get(target.Digest); err == nil { + // If the image already exists locally, no need to pull + // anything. + return target.Digest, manifestDigest, nil + } + + var descriptors []xfer.DownloadDescriptor + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for _, d := range mfst.Layers { + layerDescriptor := &v2LayerDescriptor{ + digest: d.Digest, + repo: p.repo, + repoInfo: p.repoInfo, + V2MetadataService: p.V2MetadataService, + src: d, + } + + descriptors = append(descriptors, layerDescriptor) + } + + configChan := make(chan []byte, 1) + configErrChan := make(chan error, 1) + layerErrChan := make(chan error, 1) + downloadsDone := make(chan struct{}) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + // Pull the image config + go func() { + configJSON, err := p.pullSchema2Config(ctx, target.Digest) + if err != nil { + configErrChan <- ImageConfigPullError{Err: err} + cancel() + return + } + configChan <- configJSON + }() + + var ( + configJSON []byte // raw serialized image config + downloadedRootFS *image.RootFS // rootFS from registered layers + configRootFS *image.RootFS // rootFS from configuration + release func() // release resources from rootFS download + ) + + // https://github.com/docker/docker/issues/24766 - Err on the side of caution, + // explicitly blocking images intended for linux from the Windows daemon. On + // Windows, we do this before the attempt to download, effectively serialising + // the download slightly slowing it down. We have to do it this way, as + // chances are the download of layers itself would fail due to file names + // which aren't suitable for NTFS. At some point in the future, if a similar + // check to block Windows images being pulled on Linux is implemented, it + // may be necessary to perform the same type of serialisation. + if runtime.GOOS == "windows" { + configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) + if err != nil { + return "", "", err + } + + if configRootFS == nil { + return "", "", errRootFSInvalid + } + + if len(descriptors) != len(configRootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + // Populate diff ids in descriptors to avoid downloading foreign layers + // which have been side loaded + for i := range descriptors { + descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] + } + } + + if p.config.DownloadManager != nil { + go func() { + var ( + err error + rootFS image.RootFS + ) + downloadRootFS := *image.NewRootFS() + rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) + if err != nil { + // Intentionally do not cancel the config download here + // as the error from config download (if there is one) + // is more interesting than the layer download error + layerErrChan <- err + return + } + + downloadedRootFS = &rootFS + close(downloadsDone) + }() + } else { + // We have nothing to download + close(downloadsDone) + } + + if configJSON == nil { + configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) + if err == nil && configRootFS == nil { + err = errRootFSInvalid + } + if err != nil { + cancel() + select { + case <-downloadsDone: + case <-layerErrChan: + } + return "", "", err + } + } + + select { + case <-downloadsDone: + case err = <-layerErrChan: + return "", "", err + } + + if release != nil { + defer release() + } + + if downloadedRootFS != nil { + // The DiffIDs returned in rootFS MUST match those in the config. + // Otherwise the image config could be referencing layers that aren't + // included in the manifest. + if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + for i := range downloadedRootFS.DiffIDs { + if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { + return "", "", errRootFSMismatch + } + } + } + + imageID, err := p.config.ImageStore.Put(configJSON) + if err != nil { + return "", "", err + } + + return imageID, manifestDigest, nil +} + +func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, error) { + select { + case configJSON := <-configChan: + rootfs, err := s.RootFSFromConfig(configJSON) + if err != nil { + return nil, nil, err + } + return configJSON, rootfs, nil + case err := <-errChan: + return nil, nil, err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specific manifests. +func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) { + manifestListDigest, err = schema2ManifestDigest(ref, mfstList) + if err != nil { + return "", "", err + } + + logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a os/arch match", ref, len(mfstList.Manifests)) + var manifestDigest digest.Digest + for _, manifestDescriptor := range mfstList.Manifests { + // TODO(aaronl): The manifest list spec supports optional + // "features" and "variant" fields. These are not yet used. + // Once they are, their values should be interpreted here. + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { + manifestDigest = manifestDescriptor.Digest + logrus.Debugf("found match for %s/%s with media type %s, digest %s", runtime.GOOS, runtime.GOARCH, manifestDescriptor.MediaType, manifestDigest.String()) + break + } + } + + if manifestDigest == "" { + errMsg := fmt.Sprintf("no matching manifest for %s/%s in the manifest list entries", runtime.GOOS, runtime.GOARCH) + logrus.Debugf(errMsg) + return "", "", errors.New(errMsg) + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return "", "", err + } + + manifest, err := manSvc.Get(ctx, manifestDigest) + if err != nil { + return "", "", err + } + + manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest) + if err != nil { + return "", "", err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + id, _, err = p.pullSchema1(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + case *schema2.DeserializedManifest: + id, _, err = p.pullSchema2(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + default: + return "", "", errors.New("unsupported manifest format") + } + + return id, manifestListDigest, err +} + +func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := p.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier := dgst.Verifier() + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier := digested.Digest().Verifier() + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + } + case errcode.Error: + if shouldV2Fallback(v) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + case *url.Error: + if v.Err == auth.ErrNoBasicAuthCredentials { + return fallbackError{err: err, confirmedV2: false} + } + } + + return err +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier := digested.Digest().Verifier() + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("invalid parent ID in the base layer of the image") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} + +func createDownloadFile() (*os.File, error) { + return ioutil.TempFile("", "GetImageBlob") +} diff --git a/components/engine/distribution/pull_v2_test.go b/components/engine/distribution/pull_v2_test.go new file mode 100644 index 0000000000..df93c1ef83 --- /dev/null +++ b/components/engine/distribution/pull_v2_test.go @@ -0,0 +1,183 @@ +package distribution + +import ( + "encoding/json" + "io/ioutil" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" +) + +// TestFixManifestLayers checks that fixManifestLayers removes a duplicate +// layer, and that it makes no changes to the manifest when called a second +// time, after the duplicate is removed. +func TestFixManifestLayers(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + duplicateLayerManifestExpectedOutput := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest") + } + + // Run fixManifestLayers again and confirm that it doesn't change the + // manifest (which no longer has duplicate layers). + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)") + } +} + +// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails +// if the base layer configuration specifies a parent. +func TestFixManifestLayersBaseLayerParent(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "invalid parent ID in the base layer of the image") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails +// if an image configuration specifies a parent that doesn't directly follow +// that (deduplicated) image in the image history. +func TestFixManifestLayersBadParent(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestValidateManifest verifies the validateManifest function +func TestValidateManifest(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + expectedDigest, err := reference.ParseNormalizedNamed("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd") + if err != nil { + t.Fatal("could not parse reference") + } + expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + + // Good manifest + + goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var goodSignedManifest schema1.SignedManifest + err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err := verifySchema1Manifest(&goodSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in good manifest") + } + + // "Extra data" manifest + + extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var extraDataSignedManifest schema1.SignedManifest + err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifySchema1Manifest(&extraDataSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in extra data manifest") + } + + // Bad manifest + + badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var badSignedManifest schema1.SignedManifest + err = json.Unmarshal(badManifestBytes, &badSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifySchema1Manifest(&badSignedManifest, expectedDigest) + if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { + t.Fatal("expected validateManifest to fail with digest error") + } +} diff --git a/components/engine/distribution/pull_v2_unix.go b/components/engine/distribution/pull_v2_unix.go new file mode 100644 index 0000000000..45a7a0c150 --- /dev/null +++ b/components/engine/distribution/pull_v2_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package distribution + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + return blobs.Open(ctx, ld.digest) +} diff --git a/components/engine/distribution/pull_v2_windows.go b/components/engine/distribution/pull_v2_windows.go new file mode 100644 index 0000000000..543ecc10eb --- /dev/null +++ b/components/engine/distribution/pull_v2_windows.go @@ -0,0 +1,57 @@ +// +build windows + +package distribution + +import ( + "net/http" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/client/transport" +) + +var _ distribution.Describable = &v2LayerDescriptor{} + +func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor { + if ld.src.MediaType == schema2.MediaTypeForeignLayer && len(ld.src.URLs) > 0 { + return ld.src + } + return distribution.Descriptor{} +} + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + rsc, err := blobs.Open(ctx, ld.digest) + + if len(ld.src.URLs) == 0 { + return rsc, err + } + + // We're done if the registry has this blob. + if err == nil { + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. + if _, err = rsc.Seek(0, os.SEEK_SET); err == nil { + return rsc, nil + } + rsc.Close() + } + + // Find the first URL that results in a 200 result code. + for _, url := range ld.src.URLs { + logrus.Debugf("Pulling %v from foreign URL %v", ld.digest, url) + rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) + + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. + _, err = rsc.Seek(0, os.SEEK_SET) + if err == nil { + break + } + logrus.Debugf("Download for %v failed: %v", ld.digest, err) + rsc.Close() + rsc = nil + } + return rsc, err +} diff --git a/components/engine/distribution/push.go b/components/engine/distribution/push.go new file mode 100644 index 0000000000..395e4d1512 --- /dev/null +++ b/components/engine/distribution/push.go @@ -0,0 +1,186 @@ +package distribution + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// Pusher is an interface that abstracts pushing for different API versions. +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push(ctx context.Context) error +} + +const compressionBufSize = 32768 + +// NewPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + case registry.APIVersion1: + return &v1Pusher{ + v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Push initiates a push operation on ref. +// ref is the specific variant of the image to be pushed. +// If no tag is provided, all tags will be pushed. +func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return err + } + + progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.Name.Name()) + + associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo.Name) + if len(associations) == 0 { + return fmt.Errorf("An image does not exist locally with the tag: %s", reference.FamiliarName(repoInfo.Name)) + } + + var ( + lastErr error + + // confirmedV2 is set to true if a push attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + + for _, endpoint := range endpoints { + if imagePushConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name.Name(), endpoint.URL, endpoint.Version) + + pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) + if err != nil { + lastErr = err + continue + } + if err := pusher.Push(ctx); err != nil { + // Was this push cancelled? If so, don't try to fall + // back. + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + lastErr = err + logrus.Infof("Attempting next endpoint for push after error: %v", err) + continue + } + } + + logrus.Errorf("Not continuing with push after error: %v", err) + return err + } + + imagePushConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "push") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.Name.Name()) + } + return lastErr +} + +// compress returns an io.ReadCloser which will supply a compressed version of +// the provided Reader. The caller must close the ReadCloser after reading the +// compressed data. +// +// Note that this function returns a reader instead of taking a writer as an +// argument so that it can be used with httpBlobWriter's ReadFrom method. +// Using httpBlobWriter's Write method would send a PATCH request for every +// Write call. +// +// The second return value is a channel that gets closed when the goroutine +// is finished. This allows the caller to make sure the goroutine finishes +// before it releases any resources connected with the reader that was +// passed in. +func compress(in io.Reader) (io.ReadCloser, chan struct{}) { + compressionDone := make(chan struct{}) + + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, in) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + close(compressionDone) + }() + + return pipeReader, compressionDone +} diff --git a/components/engine/distribution/push_v1.go b/components/engine/distribution/push_v1.go new file mode 100644 index 0000000000..431faaf28c --- /dev/null +++ b/components/engine/distribution/push_v1.go @@ -0,0 +1,457 @@ +package distribution + +import ( + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +type v1Pusher struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + ref reference.Named + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + session *registry.Session +} + +func (p *v1Pusher) Push(ctx context.Context) error { + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was NoTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + return fallbackError{err: err} + } + if err := p.pushRepository(ctx); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + return nil +} + +// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an +// image being pushed to a v1 registry. +type v1Image interface { + Config() []byte + Layer() layer.Layer + V1ID() string +} + +type v1ImageCommon struct { + layer layer.Layer + config []byte + v1ID string +} + +func (common *v1ImageCommon) Config() []byte { + return common.config +} + +func (common *v1ImageCommon) V1ID() string { + return common.v1ID +} + +func (common *v1ImageCommon) Layer() layer.Layer { + return common.layer +} + +// v1TopImage defines a runnable (top layer) image being pushed to a v1 +// registry. +type v1TopImage struct { + v1ImageCommon + imageID image.ID +} + +func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { + v1ID := imageID.Digest().Hex() + parentV1ID := "" + if parent != nil { + parentV1ID = parent.V1ID() + } + + config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) + if err != nil { + return nil, err + } + + return &v1TopImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: config, + layer: l, + }, + imageID: imageID, + }, nil +} + +// v1DependencyImage defines a dependency layer being pushed to a v1 registry. +type v1DependencyImage struct { + v1ImageCommon +} + +func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) *v1DependencyImage { + v1ID := digest.Digest(l.ChainID()).Hex() + + var config string + if parent != nil { + config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) + } else { + config = fmt.Sprintf(`{"id":"%s"}`, v1ID) + } + return &v1DependencyImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: []byte(config), + layer: l, + }, + } +} + +// Retrieve the all the images to be uploaded in the correct order +func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []PushLayer, err error) { + tagsByImage = make(map[image.ID][]string) + + // Ignore digest references + if _, isCanonical := p.ref.(reference.Canonical); isCanonical { + return + } + + tagged, isTagged := p.ref.(reference.NamedTagged) + if isTagged { + // Push a specific tag + var imgID image.ID + var dgst digest.Digest + dgst, err = p.config.ReferenceStore.Get(p.ref) + if err != nil { + return + } + imgID = image.IDFromDigest(dgst) + + imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) + if err != nil { + return + } + + tagsByImage[imgID] = []string{tagged.Tag()} + + return + } + + imagesSeen := make(map[digest.Digest]struct{}) + dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) + + associations := p.config.ReferenceStore.ReferencesByName(p.ref) + for _, association := range associations { + if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { + // Ignore digest references. + continue + } + + imgID := image.IDFromDigest(association.ID) + tagsByImage[imgID] = append(tagsByImage[imgID], tagged.Tag()) + + if _, present := imagesSeen[association.ID]; present { + // Skip generating image list for already-seen image + continue + } + imagesSeen[association.ID] = struct{}{} + + imageListForThisTag, err := p.imageListForTag(imgID, dependenciesSeen, &referencedLayers) + if err != nil { + return nil, nil, nil, err + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + logrus.Debugf("Image list: %v", imageList) + logrus.Debugf("Tags by image: %v", tagsByImage) + + return +} + +func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]PushLayer) (imageListForThisTag []v1Image, err error) { + ics, ok := p.config.ImageStore.(*imageConfigStore) + if !ok { + return nil, fmt.Errorf("only image store images supported for v1 push") + } + img, err := ics.Store.Get(imgID) + if err != nil { + return nil, err + } + + topLayerID := img.RootFS.ChainID() + + pl, err := p.config.LayerStore.Get(topLayerID) + *referencedLayers = append(*referencedLayers, pl) + if err != nil { + return nil, fmt.Errorf("failed to get top layer from image: %v", err) + } + + // V1 push is deprecated, only support existing layerstore layers + lsl, ok := pl.(*storeLayer) + if !ok { + return nil, fmt.Errorf("only layer store layers supported for v1 push") + } + l := lsl.Layer + + dependencyImages, parent := generateDependencyImages(l.Parent(), dependenciesSeen) + + topImage, err := newV1TopImage(imgID, img, l, parent) + if err != nil { + return nil, err + } + + imageListForThisTag = append(dependencyImages, topImage) + + return +} + +func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage) { + if l == nil { + return nil, nil + } + + imageListForThisTag, parent = generateDependencyImages(l.Parent(), dependenciesSeen) + + if dependenciesSeen != nil { + if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { + // This layer is already on the list, we can ignore it + // and all its parents. + return imageListForThisTag, dependencyImage + } + } + + dependencyImage := newV1DependencyImage(l, parent) + imageListForThisTag = append(imageListForThisTag, dependencyImage) + + if dependenciesSeen != nil { + dependenciesSeen[l.ChainID()] = dependencyImage + } + + return imageListForThisTag, dependencyImage +} + +// createImageIndex returns an index of an image's layer IDs and tags. +func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { + var imageIndex []*registry.ImgData + for _, img := range images { + v1ID := img.V1ID() + + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + if tags, hasTags := tags[topImage.imageID]; hasTags { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: tag, + }) + } + continue + } + } + + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is associated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: "", + }) + } + return imageIndex +} + +// lookupImageOnEndpoint checks the specified endpoint to see if an image exists +// and if it is absent then it sends the image id to the channel to be pushed. +func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { + defer wg.Done() + for image := range images { + v1ID := image.V1ID() + truncID := stringid.TruncateID(image.Layer().DiffID().String()) + if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { + logrus.Errorf("Error in LookupRemoteImage: %s", err) + imagesToPush <- v1ID + progress.Update(p.config.ProgressOutput, truncID, "Waiting") + } else { + progress.Update(p.config.ProgressOutput, truncID, "Already exists") + } + } +} + +func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { + workerCount := len(imageList) + // start a maximum of 5 workers to check if images exist on the specified endpoint. + if workerCount > 5 { + workerCount = 5 + } + var ( + wg = &sync.WaitGroup{} + imageData = make(chan v1Image, workerCount*2) + imagesToPush = make(chan string, workerCount*2) + pushes = make(chan map[string]struct{}, 1) + ) + for i := 0; i < workerCount; i++ { + wg.Add(1) + go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) + } + // start a go routine that consumes the images to push + go func() { + shouldPush := make(map[string]struct{}) + for id := range imagesToPush { + shouldPush[id] = struct{}{} + } + pushes <- shouldPush + }() + for _, v1Image := range imageList { + imageData <- v1Image + } + // close the channel to notify the workers that there will be no more images to check. + close(imageData) + wg.Wait() + close(imagesToPush) + // wait for all the images that require pushes to be collected into a consumable map. + shouldPush := <-pushes + // finish by pushing any images and tags to the endpoint. The order that the images are pushed + // is very important that is why we are still iterating over the ordered list of imageIDs. + for _, img := range imageList { + v1ID := img.V1ID() + if _, push := shouldPush[v1ID]; push { + if _, err := p.pushImage(ctx, img, endpoint); err != nil { + // FIXME: Continue on error? + return err + } + } + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + for _, tag := range tags[topImage.imageID] { + progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+reference.Path(p.repoInfo.Name)+"/tags/"+tag) + if err := p.session.PushRegistryTag(p.repoInfo.Name, v1ID, tag, endpoint); err != nil { + return err + } + } + } + } + return nil +} + +// pushRepository pushes layers that do not already exist on the registry. +func (p *v1Pusher) pushRepository(ctx context.Context) error { + imgList, tags, referencedLayers, err := p.getImageList() + defer func() { + for _, l := range referencedLayers { + l.Release() + } + }() + if err != nil { + return err + } + + imageIndex := createImageIndex(imgList, tags) + for _, data := range imageIndex { + logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err := p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, false, nil) + if err != nil { + return err + } + // push the repository to each of the endpoints only if it does not exist. + for _, endpoint := range repoData.Endpoints { + if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { + return err + } + } + _, err = p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, true, repoData.Endpoints) + return err +} + +func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { + l := v1Image.Layer() + v1ID := v1Image.V1ID() + truncID := stringid.TruncateID(l.DiffID().String()) + + jsonRaw := v1Image.Config() + progress.Update(p.config.ProgressOutput, truncID, "Pushing") + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() + imgData := ®istry.ImgData{ + ID: v1ID, + } + + // Send the json + if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { + if err == registry.ErrAlreadyExists { + progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") + return "", nil + } + return "", err + } + + arch, err := l.TarStream() + if err != nil { + return "", err + } + defer arch.Close() + + // don't care if this fails; best effort + size, _ := l.DiffSize() + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") + defer reader.Close() + + checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { + return "", err + } + + if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { + logrus.Warnf("Could not set v1 ID mapping: %v", err) + } + + progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") + return imgData.Checksum, nil +} diff --git a/components/engine/distribution/push_v2.go b/components/engine/distribution/push_v2.go new file mode 100644 index 0000000000..5a6673780c --- /dev/null +++ b/components/engine/distribution/push_v2.go @@ -0,0 +1,691 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "runtime" + "sort" + "strings" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" +) + +const ( + smallLayerMaximumSize = 100 * (1 << 10) // 100KB + middleLayerMaximumSize = 10 * (1 << 20) // 10MB +) + +type v2Pusher struct { + v2MetadataService metadata.V2MetadataService + ref reference.Named + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + repo distribution.Repository + + // pushState is state built by the Upload functions. + pushState pushState +} + +type pushState struct { + sync.Mutex + // remoteLayers is the set of layers known to exist on the remote side. + // This avoids redundant queries when pushing multiple tags that + // involve the same layers. It is also used to fill in digest and size + // information when building the manifest. + remoteLayers map[layer.DiffID]distribution.Descriptor + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Pusher) Push(ctx context.Context) (err error) { + p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) + + p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pushV2Repository(ctx); err != nil { + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.pushState.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { + if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { + imageID, err := p.config.ReferenceStore.Get(p.ref) + if err != nil { + return fmt.Errorf("tag does not exist: %s", reference.FamiliarString(p.ref)) + } + + return p.pushV2Tag(ctx, namedTagged, imageID) + } + + if !reference.IsNameOnly(p.ref) { + return errors.New("cannot push a digest reference") + } + + // Pull all tags + pushed := 0 + for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { + if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { + pushed++ + if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil { + return err + } + } + } + + if pushed == 0 { + return fmt.Errorf("no tags to push for %s", reference.FamiliarName(p.repoInfo.Name)) + } + + return nil +} + +func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { + logrus.Debugf("Pushing repository: %s", reference.FamiliarString(ref)) + + imgConfig, err := p.config.ImageStore.Get(id) + if err != nil { + return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err) + } + + rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig) + if err != nil { + return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err) + } + + l, err := p.config.LayerStore.Get(rootfs.ChainID()) + if err != nil { + return fmt.Errorf("failed to get top layer from image: %v", err) + } + defer l.Release() + + hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig) + if err != nil { + return fmt.Errorf("failed to compute hmac key of auth config: %v", err) + } + + var descriptors []xfer.UploadDescriptor + + descriptorTemplate := v2PushDescriptor{ + v2MetadataService: p.v2MetadataService, + hmacKey: hmacKey, + repoInfo: p.repoInfo.Name, + ref: p.ref, + endpoint: p.endpoint, + repo: p.repo, + pushState: &p.pushState, + } + + // Loop bounds condition is to avoid pushing the base layer on Windows. + for i := 0; i < len(rootfs.DiffIDs); i++ { + descriptor := descriptorTemplate + descriptor.layer = l + descriptor.checkedDigests = make(map[digest.Digest]struct{}) + descriptors = append(descriptors, &descriptor) + + l = l.Parent() + } + + if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { + return err + } + + // Try schema2 first + builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig) + manifest, err := manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return err + } + + putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 { + logrus.Warnf("failed to upload schema2 manifest: %v", err) + return err + } + + logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) + + manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag()) + if err != nil { + return err + } + builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig) + manifest, err = manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + return err + } + } + + var canonicalManifest []byte + + switch v := manifest.(type) { + case *schema1.SignedManifest: + canonicalManifest = v.Canonical + case *schema2.DeserializedManifest: + _, canonicalManifest, err = v.Payload() + if err != nil { + return err + } + } + + manifestDigest := digest.FromBytes(canonicalManifest) + progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) + + if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return err + } + + // Signal digest to the trust client so it can sign the + // push, if appropriate. + progress.Aux(p.config.ProgressOutput, apitypes.PushResult{Tag: ref.Tag(), Digest: manifestDigest.String(), Size: len(canonicalManifest)}) + + return nil +} + +func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { + // descriptors is in reverse order; iterate backwards to get references + // appended in the right order. + for i := len(descriptors) - 1; i >= 0; i-- { + if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { + return nil, err + } + } + + return builder.Build(ctx) +} + +type v2PushDescriptor struct { + layer PushLayer + v2MetadataService metadata.V2MetadataService + hmacKey []byte + repoInfo reference.Named + ref reference.Named + endpoint registry.APIEndpoint + repo distribution.Repository + pushState *pushState + remoteDescriptor distribution.Descriptor + // a set of digests whose presence has been checked in a target repository + checkedDigests map[digest.Digest]struct{} +} + +func (pd *v2PushDescriptor) Key() string { + return "v2push:" + pd.ref.Name() + " " + pd.layer.DiffID().String() +} + +func (pd *v2PushDescriptor) ID() string { + return stringid.TruncateID(pd.layer.DiffID().String()) +} + +func (pd *v2PushDescriptor) DiffID() layer.DiffID { + return pd.layer.DiffID() +} + +func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + // Skip foreign layers unless this registry allows nondistributable artifacts. + if !pd.endpoint.AllowNondistributableArtifacts { + if fs, ok := pd.layer.(distribution.Describable); ok { + if d := fs.Descriptor(); len(d.URLs) > 0 { + progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") + return d, nil + } + } + } + + diffID := pd.DiffID() + + pd.pushState.Lock() + if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { + // it is already known that the push is not needed and + // therefore doing a stat is unnecessary + pd.pushState.Unlock() + progress.Update(progressOutput, pd.ID(), "Layer already exists") + return descriptor, nil + } + pd.pushState.Unlock() + + maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer) + + // Do we have any metadata associated with this layer's DiffID? + v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) + if err == nil { + // check for blob existence in the target repository + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, true, 1, v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then push the blob. + bs := pd.repo.Blobs(ctx) + + var layerUpload distribution.BlobWriter + + // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload + candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata) + for _, mountCandidate := range candidates { + logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) + createOpts := []distribution.BlobCreateOption{} + + if len(mountCandidate.SourceRepository) > 0 { + namedRef, err := reference.ParseNormalizedNamed(mountCandidate.SourceRepository) + if err != nil { + logrus.Errorf("failed to parse source repository reference %v: %v", reference.FamiliarString(namedRef), err) + pd.v2MetadataService.Remove(mountCandidate) + continue + } + + // Candidates are always under same domain, create remote reference + // with only path to set mount from with + remoteRef, err := reference.WithName(reference.Path(namedRef)) + if err != nil { + logrus.Errorf("failed to make remote reference out of %q: %v", reference.Path(namedRef), err) + continue + } + + canonicalRef, err := reference.WithDigest(reference.TrimNamed(remoteRef), mountCandidate.Digest) + if err != nil { + logrus.Errorf("failed to make canonical reference: %v", err) + continue + } + + createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) + } + + // send the layer + lu, err := bs.Create(ctx, createOpts...) + switch err := err.(type) { + case nil: + // noop + case distribution.ErrBlobMounted: + progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) + + err.Descriptor.MediaType = schema2.MediaTypeLayer + + pd.pushState.Lock() + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = err.Descriptor + pd.pushState.Unlock() + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: err.Descriptor.Digest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + return err.Descriptor, nil + default: + logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) + } + + if len(mountCandidate.SourceRepository) > 0 && + (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || + len(mountCandidate.HMAC) == 0) { + cause := "blob mount failure" + if err != nil { + cause = fmt.Sprintf("an error: %v", err.Error()) + } + logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) + pd.v2MetadataService.Remove(mountCandidate) + } + + if lu != nil { + // cancel previous upload + cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload) + layerUpload = lu + } + } + + if maxExistenceChecks-len(pd.checkedDigests) > 0 { + // do additional layer existence checks with other known digests if any + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + logrus.Debugf("Pushing layer: %s", diffID) + if layerUpload == nil { + layerUpload, err = bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + } + defer layerUpload.Close() + + // upload the blob + desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) + if err != nil { + return desc, err + } + + return desc, nil +} + +func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { + pd.remoteDescriptor = descriptor +} + +func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { + return pd.remoteDescriptor +} + +func (pd *v2PushDescriptor) uploadUsingSession( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + layerUpload distribution.BlobWriter, +) (distribution.Descriptor, error) { + var reader io.ReadCloser + + contentReader, err := pd.layer.Open() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + size, _ := pd.layer.Size() + + reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing") + + switch m := pd.layer.MediaType(); m { + case schema2.MediaTypeUncompressedLayer: + compressedReader, compressionDone := compress(reader) + defer func(closer io.Closer) { + closer.Close() + <-compressionDone + }(reader) + reader = compressedReader + case schema2.MediaTypeLayer: + default: + reader.Close() + return distribution.Descriptor{}, fmt.Errorf("unsupported layer media type %s", m) + } + + digester := digest.Canonical.Digester() + tee := io.TeeReader(reader, digester.Hash()) + + nn, err := layerUpload.ReadFrom(tee) + reader.Close() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + pushDigest := digester.Digest() + if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) + progress.Update(progressOutput, pd.ID(), "Pushed") + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: pushDigest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + desc := distribution.Descriptor{ + Digest: pushDigest, + MediaType: schema2.MediaTypeLayer, + Size: nn, + } + + pd.pushState.Lock() + // If Commit succeeded, that's an indication that the remote registry speaks the v2 protocol. + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + + return desc, nil +} + +// layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata" +// slice. If it finds one that the registry knows about, it returns the known digest and "true". If +// "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository +// (not just the target one). +func (pd *v2PushDescriptor) layerAlreadyExists( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + checkOtherRepositories bool, + maxExistenceCheckAttempts int, + v2Metadata []metadata.V2Metadata, +) (desc distribution.Descriptor, exists bool, err error) { + // filter the metadata + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.Name() { + continue + } + candidates = append(candidates, meta) + } + // sort the candidates by similarity + sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates) + + digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata) + // an array of unique blob digests ordered from the best mount candidates to worst + layerDigests := []digest.Digest{} + for i := 0; i < len(candidates); i++ { + if len(layerDigests) >= maxExistenceCheckAttempts { + break + } + meta := &candidates[i] + if _, exists := digestToMetadata[meta.Digest]; exists { + // keep reference just to the first mapping (the best mount candidate) + continue + } + if _, exists := pd.checkedDigests[meta.Digest]; exists { + // existence of this digest has already been tested + continue + } + digestToMetadata[meta.Digest] = meta + layerDigests = append(layerDigests, meta.Digest) + } + +attempts: + for _, dgst := range layerDigests { + meta := digestToMetadata[dgst] + logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) + desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) + pd.checkedDigests[meta.Digest] = struct{}{} + switch err { + case nil: + if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.Name() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { + // cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: desc.Digest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err} + } + } + desc.MediaType = schema2.MediaTypeLayer + exists = true + break attempts + case distribution.ErrBlobUnknown: + if meta.SourceRepository == pd.repoInfo.Name() { + // remove the mapping to the target repository + pd.v2MetadataService.Remove(*meta) + } + default: + logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) + } + } + + if exists { + progress.Update(progressOutput, pd.ID(), "Layer already exists") + pd.pushState.Lock() + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + } + + return desc, exists, nil +} + +// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from +// source repositories of target registry, maximum number of layer existence checks performed on the target +// repository and whether the check shall be done also with digests mapped to different repositories. The +// decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost +// of upload does not outweigh a latency. +func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { + size, err := layer.Size() + switch { + // big blob + case size > middleLayerMaximumSize: + // 1st attempt to mount the blob few times + // 2nd few existence checks with digests associated to any repository + // then fallback to upload + return 4, 3, true + + // middle sized blobs; if we could not get the size, assume we deal with middle sized blob + case size > smallLayerMaximumSize, err != nil: + // 1st attempt to mount blobs of average size few times + // 2nd try at most 1 existence check if there's an existing mapping to the target repository + // then fallback to upload + return 3, 1, false + + // small blobs, do a minimum number of checks + default: + return 1, 1, false + } +} + +// getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The +// array is sorted from youngest to oldest. If requireRegistryMatch is true, the resulting array will contain +// only metadata entries having registry part of SourceRepository matching the part of repoInfo. +func getRepositoryMountCandidates( + repoInfo reference.Named, + hmacKey []byte, + max int, + v2Metadata []metadata.V2Metadata, +) []metadata.V2Metadata { + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + sourceRepo, err := reference.ParseNamed(meta.SourceRepository) + if err != nil || reference.Domain(repoInfo) != reference.Domain(sourceRepo) { + continue + } + // target repository is not a viable candidate + if meta.SourceRepository == repoInfo.Name() { + continue + } + candidates = append(candidates, meta) + } + + sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates) + if max >= 0 && len(candidates) > max { + // select the youngest metadata + candidates = candidates[:max] + } + + return candidates +} + +// byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The +// candidate "a" is preferred over "b": +// +// 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the +// "b" was not +// 2. if a number of its repository path components exactly matching path components of target repository is higher +type byLikeness struct { + arr []metadata.V2Metadata + hmacKey []byte + pathComponents []string +} + +func (bla byLikeness) Less(i, j int) bool { + aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey) + bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey) + if aMacMatch != bMacMatch { + return aMacMatch + } + aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents) + bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents) + return aMatch > bMatch +} +func (bla byLikeness) Swap(i, j int) { + bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i] +} +func (bla byLikeness) Len() int { return len(bla.arr) } + +func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) { + // reverse the metadata array to shift the newest entries to the beginning + for i := 0; i < len(marr)/2; i++ { + marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i] + } + // keep equal entries ordered from the youngest to the oldest + sort.Stable(byLikeness{ + arr: marr, + hmacKey: hmacKey, + pathComponents: getPathComponents(repoInfo.Name()), + }) +} + +// numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents". +func numOfMatchingPathComponents(pth string, matchComponents []string) int { + pthComponents := getPathComponents(pth) + i := 0 + for ; i < len(pthComponents) && i < len(matchComponents); i++ { + if matchComponents[i] != pthComponents[i] { + return i + } + } + return i +} + +func getPathComponents(path string) []string { + return strings.Split(path, "/") +} + +func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) { + if layerUpload != nil { + logrus.Debugf("cancelling upload of blob %s", dgst) + err := layerUpload.Cancel(ctx) + if err != nil { + logrus.Warnf("failed to cancel upload: %v", err) + } + } +} diff --git a/components/engine/distribution/push_v2_test.go b/components/engine/distribution/push_v2_test.go new file mode 100644 index 0000000000..e8e2b1af25 --- /dev/null +++ b/components/engine/distribution/push_v2_test.go @@ -0,0 +1,583 @@ +package distribution + +import ( + "net/http" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/opencontainers/go-digest" +) + +func TestGetRepositoryMountCandidates(t *testing.T) { + for _, tc := range []struct { + name string + hmacKey string + targetRepo string + maxCandidates int + metadata []metadata.V2Metadata + candidates []metadata.V2Metadata + }{ + { + name: "empty metadata", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{}, + candidates: []metadata.V2Metadata{}, + }, + { + name: "one item not matching", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{taggedMetadata("key", "dgst", "127.0.0.1/repo")}, + candidates: []metadata.V2Metadata{}, + }, + { + name: "one item matching", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{taggedMetadata("hash", "1", "docker.io/library/hello-world")}, + candidates: []metadata.V2Metadata{taggedMetadata("hash", "1", "docker.io/library/hello-world")}, + }, + { + name: "allow missing SourceRepository", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("3")}, + {Digest: digest.Digest("2")}, + }, + candidates: []metadata.V2Metadata{}, + }, + { + name: "handle docker.io", + targetRepo: "user/app", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, + {Digest: digest.Digest("3"), SourceRepository: "docker.io/user/bar"}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/app"}, + }, + candidates: []metadata.V2Metadata{ + {Digest: digest.Digest("3"), SourceRepository: "docker.io/user/bar"}, + {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/app"}, + }, + }, + { + name: "sort more items", + hmacKey: "abcd", + targetRepo: "127.0.0.1/foo/bar", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + taggedMetadata("hash", "1", "docker.io/library/hello-world"), + taggedMetadata("efgh", "2", "127.0.0.1/hello-world"), + taggedMetadata("abcd", "3", "docker.io/library/busybox"), + taggedMetadata("hash", "4", "docker.io/library/busybox"), + taggedMetadata("hash", "5", "127.0.0.1/foo"), + taggedMetadata("hash", "6", "127.0.0.1/bar"), + taggedMetadata("efgh", "7", "127.0.0.1/foo/bar"), + taggedMetadata("abcd", "8", "127.0.0.1/xyz"), + taggedMetadata("hash", "9", "127.0.0.1/foo/app"), + }, + candidates: []metadata.V2Metadata{ + // first by matching hash + taggedMetadata("abcd", "8", "127.0.0.1/xyz"), + // then by longest matching prefix + taggedMetadata("hash", "9", "127.0.0.1/foo/app"), + taggedMetadata("hash", "5", "127.0.0.1/foo"), + // sort the rest of the matching items in reversed order + taggedMetadata("hash", "6", "127.0.0.1/bar"), + taggedMetadata("efgh", "2", "127.0.0.1/hello-world"), + }, + }, + { + name: "limit max candidates", + hmacKey: "abcd", + targetRepo: "user/app", + maxCandidates: 3, + metadata: []metadata.V2Metadata{ + taggedMetadata("abcd", "1", "docker.io/user/app1"), + taggedMetadata("abcd", "2", "docker.io/user/app/base"), + taggedMetadata("hash", "3", "docker.io/user/app"), + taggedMetadata("abcd", "4", "127.0.0.1/user/app"), + taggedMetadata("hash", "5", "docker.io/user/foo"), + taggedMetadata("hash", "6", "docker.io/app/bar"), + }, + candidates: []metadata.V2Metadata{ + // first by matching hash + taggedMetadata("abcd", "2", "docker.io/user/app/base"), + taggedMetadata("abcd", "1", "docker.io/user/app1"), + // then by longest matching prefix + // "docker.io/usr/app" is excluded since candidates must + // be from a different repository + taggedMetadata("hash", "5", "docker.io/user/foo"), + }, + }, + } { + repoInfo, err := reference.ParseNormalizedNamed(tc.targetRepo) + if err != nil { + t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) + } + candidates := getRepositoryMountCandidates(repoInfo, []byte(tc.hmacKey), tc.maxCandidates, tc.metadata) + if len(candidates) != len(tc.candidates) { + t.Errorf("[%s] got unexpected number of candidates: %d != %d", tc.name, len(candidates), len(tc.candidates)) + } + for i := 0; i < len(candidates) && i < len(tc.candidates); i++ { + if !reflect.DeepEqual(candidates[i], tc.candidates[i]) { + t.Errorf("[%s] candidate %d does not match expected: %#+v != %#+v", tc.name, i, candidates[i], tc.candidates[i]) + } + } + for i := len(candidates); i < len(tc.candidates); i++ { + t.Errorf("[%s] missing expected candidate at position %d (%#+v)", tc.name, i, tc.candidates[i]) + } + for i := len(tc.candidates); i < len(candidates); i++ { + t.Errorf("[%s] got unexpected candidate at position %d (%#+v)", tc.name, i, candidates[i]) + } + } +} + +func TestLayerAlreadyExists(t *testing.T) { + for _, tc := range []struct { + name string + metadata []metadata.V2Metadata + targetRepo string + hmacKey string + maxExistenceChecks int + checkOtherRepositories bool + remoteBlobs map[digest.Digest]distribution.Descriptor + remoteErrors map[digest.Digest]error + expectedDescriptor distribution.Descriptor + expectedExists bool + expectedError error + expectedRequests []string + expectedAdditions []metadata.V2Metadata + expectedRemovals []metadata.V2Metadata + }{ + { + name: "empty metadata", + targetRepo: "busybox", + maxExistenceChecks: 3, + checkOtherRepositories: true, + }, + { + name: "single not existent metadata", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + maxExistenceChecks: 3, + expectedRequests: []string{"pear"}, + expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + }, + { + name: "access denied", + targetRepo: "busybox", + maxExistenceChecks: 1, + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + remoteErrors: map[digest.Digest]error{digest.Digest("apple"): distribution.ErrAccessDenied}, + expectedError: nil, + expectedRequests: []string{"apple"}, + }, + { + name: "not matching reposies", + targetRepo: "busybox", + maxExistenceChecks: 3, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/library/busybox/subapp"}, + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"}, + {Digest: digest.Digest("plum"), SourceRepository: "busybox"}, + {Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"}, + }, + }, + { + name: "check other repositories", + targetRepo: "busybox", + maxExistenceChecks: 10, + checkOtherRepositories: true, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/busybox/subapp"}, + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"}, + }, + expectedRequests: []string{"plum", "apple", "pear", "orange", "banana"}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, + }, + }, + { + name: "find existing blob", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + }, + { + name: "find existing blob with different hmac", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{SourceRepository: "docker.io/library/busybox", Digest: digest.Digest("apple"), HMAC: "dummyhmac"}}, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + }, + { + name: "overwrite media types", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + hmacKey: "key", + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple"), MediaType: "custom-media-type"}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "apple", "docker.io/library/busybox")}, + }, + { + name: "find existing blob among many", + targetRepo: "127.0.0.1/myapp", + hmacKey: "key", + metadata: []metadata.V2Metadata{ + taggedMetadata("someotherkey", "pear", "127.0.0.1/myapp"), + taggedMetadata("key", "apple", "127.0.0.1/myapp"), + taggedMetadata("", "plum", "127.0.0.1/myapp"), + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple", "plum", "pear"}, + expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "pear", "127.0.0.1/myapp")}, + expectedRemovals: []metadata.V2Metadata{ + taggedMetadata("key", "apple", "127.0.0.1/myapp"), + {Digest: digest.Digest("plum"), SourceRepository: "127.0.0.1/myapp"}, + }, + }, + { + name: "reach maximum existence checks", + targetRepo: "user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedExists: false, + expectedRequests: []string{"banana", "plum", "apple"}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + }, + }, + { + name: "zero allowed existence checks", + targetRepo: "user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 0, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + }, + { + name: "stat single digest just once", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + taggedMetadata("key1", "pear", "docker.io/library/busybox"), + taggedMetadata("key2", "apple", "docker.io/library/busybox"), + taggedMetadata("key3", "apple", "docker.io/library/busybox"), + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple", "pear"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + expectedRemovals: []metadata.V2Metadata{taggedMetadata("key3", "apple", "docker.io/library/busybox")}, + }, + { + name: "don't stop on first error", + targetRepo: "user/app", + hmacKey: "key", + metadata: []metadata.V2Metadata{ + taggedMetadata("key", "banana", "docker.io/user/app"), + taggedMetadata("key", "orange", "docker.io/user/app"), + taggedMetadata("key", "plum", "docker.io/user/app"), + }, + maxExistenceChecks: 3, + remoteErrors: map[digest.Digest]error{"orange": distribution.ErrAccessDenied}, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {}}, + expectedError: nil, + expectedRequests: []string{"plum", "orange", "banana"}, + expectedRemovals: []metadata.V2Metadata{ + taggedMetadata("key", "plum", "docker.io/user/app"), + taggedMetadata("key", "banana", "docker.io/user/app"), + }, + }, + { + name: "remove outdated metadata", + targetRepo: "docker.io/user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 3, + remoteErrors: map[digest.Digest]error{"orange": distribution.ErrBlobUnknown}, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("plum"): {}}, + expectedExists: false, + expectedRequests: []string{"orange"}, + expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"}}, + }, + { + name: "missing SourceRepository", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("3")}, + {Digest: digest.Digest("2")}, + }, + maxExistenceChecks: 3, + expectedExists: false, + expectedRequests: []string{"2", "3", "1"}, + }, + + { + name: "with and without SourceRepository", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("3")}, + }, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("1"): {Digest: digest.Digest("1")}}, + maxExistenceChecks: 3, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("1"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"2", "3", "1"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("1"), SourceRepository: "docker.io/library/busybox"}}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"}, + }, + }, + } { + repoInfo, err := reference.ParseNormalizedNamed(tc.targetRepo) + if err != nil { + t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) + } + repo := &mockRepo{ + t: t, + errors: tc.remoteErrors, + blobs: tc.remoteBlobs, + requests: []string{}, + } + ctx := context.Background() + ms := &mockV2MetadataService{} + pd := &v2PushDescriptor{ + hmacKey: []byte(tc.hmacKey), + repoInfo: repoInfo, + layer: &storeLayer{ + Layer: layer.EmptyLayer, + }, + repo: repo, + v2MetadataService: ms, + pushState: &pushState{remoteLayers: make(map[layer.DiffID]distribution.Descriptor)}, + checkedDigests: make(map[digest.Digest]struct{}), + } + + desc, exists, err := pd.layerAlreadyExists(ctx, &progressSink{t}, layer.EmptyLayer.DiffID(), tc.checkOtherRepositories, tc.maxExistenceChecks, tc.metadata) + + if !reflect.DeepEqual(desc, tc.expectedDescriptor) { + t.Errorf("[%s] got unexpected descriptor: %#+v != %#+v", tc.name, desc, tc.expectedDescriptor) + } + if exists != tc.expectedExists { + t.Errorf("[%s] got unexpected exists: %t != %t", tc.name, exists, tc.expectedExists) + } + if !reflect.DeepEqual(err, tc.expectedError) { + t.Errorf("[%s] got unexpected error: %#+v != %#+v", tc.name, err, tc.expectedError) + } + + if len(repo.requests) != len(tc.expectedRequests) { + t.Errorf("[%s] got unexpected number of requests: %d != %d", tc.name, len(repo.requests), len(tc.expectedRequests)) + } + for i := 0; i < len(repo.requests) && i < len(tc.expectedRequests); i++ { + if repo.requests[i] != tc.expectedRequests[i] { + t.Errorf("[%s] request %d does not match expected: %q != %q", tc.name, i, repo.requests[i], tc.expectedRequests[i]) + } + } + for i := len(repo.requests); i < len(tc.expectedRequests); i++ { + t.Errorf("[%s] missing expected request at position %d (%q)", tc.name, i, tc.expectedRequests[i]) + } + for i := len(tc.expectedRequests); i < len(repo.requests); i++ { + t.Errorf("[%s] got unexpected request at position %d (%q)", tc.name, i, repo.requests[i]) + } + + if len(ms.added) != len(tc.expectedAdditions) { + t.Errorf("[%s] got unexpected number of additions: %d != %d", tc.name, len(ms.added), len(tc.expectedAdditions)) + } + for i := 0; i < len(ms.added) && i < len(tc.expectedAdditions); i++ { + if ms.added[i] != tc.expectedAdditions[i] { + t.Errorf("[%s] added metadata at %d does not match expected: %q != %q", tc.name, i, ms.added[i], tc.expectedAdditions[i]) + } + } + for i := len(ms.added); i < len(tc.expectedAdditions); i++ { + t.Errorf("[%s] missing expected addition at position %d (%q)", tc.name, i, tc.expectedAdditions[i]) + } + for i := len(tc.expectedAdditions); i < len(ms.added); i++ { + t.Errorf("[%s] unexpected metadata addition at position %d (%q)", tc.name, i, ms.added[i]) + } + + if len(ms.removed) != len(tc.expectedRemovals) { + t.Errorf("[%s] got unexpected number of removals: %d != %d", tc.name, len(ms.removed), len(tc.expectedRemovals)) + } + for i := 0; i < len(ms.removed) && i < len(tc.expectedRemovals); i++ { + if ms.removed[i] != tc.expectedRemovals[i] { + t.Errorf("[%s] removed metadata at %d does not match expected: %q != %q", tc.name, i, ms.removed[i], tc.expectedRemovals[i]) + } + } + for i := len(ms.removed); i < len(tc.expectedRemovals); i++ { + t.Errorf("[%s] missing expected removal at position %d (%q)", tc.name, i, tc.expectedRemovals[i]) + } + for i := len(tc.expectedRemovals); i < len(ms.removed); i++ { + t.Errorf("[%s] removed unexpected metadata at position %d (%q)", tc.name, i, ms.removed[i]) + } + } +} + +func taggedMetadata(key string, dgst string, sourceRepo string) metadata.V2Metadata { + meta := metadata.V2Metadata{ + Digest: digest.Digest(dgst), + SourceRepository: sourceRepo, + } + + meta.HMAC = metadata.ComputeV2MetadataHMAC([]byte(key), &meta) + return meta +} + +type mockRepo struct { + t *testing.T + errors map[digest.Digest]error + blobs map[digest.Digest]distribution.Descriptor + requests []string +} + +var _ distribution.Repository = &mockRepo{} + +func (m *mockRepo) Named() reference.Named { + m.t.Fatalf("Named() not implemented") + return nil +} +func (m *mockRepo) Manifests(ctc context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + m.t.Fatalf("Manifests() not implemented") + return nil, nil +} +func (m *mockRepo) Tags(ctc context.Context) distribution.TagService { + m.t.Fatalf("Tags() not implemented") + return nil +} +func (m *mockRepo) Blobs(ctx context.Context) distribution.BlobStore { + return &mockBlobStore{ + repo: m, + } +} + +type mockBlobStore struct { + repo *mockRepo +} + +var _ distribution.BlobStore = &mockBlobStore{} + +func (m *mockBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + m.repo.requests = append(m.repo.requests, dgst.String()) + if err, exists := m.repo.errors[dgst]; exists { + return distribution.Descriptor{}, err + } + if desc, exists := m.repo.blobs[dgst]; exists { + return desc, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} +func (m *mockBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + m.repo.t.Fatal("Get() not implemented") + return nil, nil +} + +func (m *mockBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + m.repo.t.Fatal("Open() not implemented") + return nil, nil +} + +func (m *mockBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + m.repo.t.Fatal("Put() not implemented") + return distribution.Descriptor{}, nil +} + +func (m *mockBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + m.repo.t.Fatal("Create() not implemented") + return nil, nil +} +func (m *mockBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + m.repo.t.Fatal("Resume() not implemented") + return nil, nil +} +func (m *mockBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + m.repo.t.Fatal("Delete() not implemented") + return nil +} +func (m *mockBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + m.repo.t.Fatalf("ServeBlob() not implemented") + return nil +} + +type mockV2MetadataService struct { + added []metadata.V2Metadata + removed []metadata.V2Metadata +} + +var _ metadata.V2MetadataService = &mockV2MetadataService{} + +func (*mockV2MetadataService) GetMetadata(diffID layer.DiffID) ([]metadata.V2Metadata, error) { + return nil, nil +} +func (*mockV2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + return "", nil +} +func (m *mockV2MetadataService) Add(diffID layer.DiffID, metadata metadata.V2Metadata) error { + m.added = append(m.added, metadata) + return nil +} +func (m *mockV2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta metadata.V2Metadata) error { + meta.HMAC = metadata.ComputeV2MetadataHMAC(hmacKey, &meta) + m.Add(diffID, meta) + return nil +} +func (m *mockV2MetadataService) Remove(metadata metadata.V2Metadata) error { + m.removed = append(m.removed, metadata) + return nil +} + +type progressSink struct { + t *testing.T +} + +func (s *progressSink) WriteProgress(p progress.Progress) error { + s.t.Logf("progress update: %#+v", p) + return nil +} diff --git a/components/engine/distribution/registry.go b/components/engine/distribution/registry.go new file mode 100644 index 0000000000..bce270a5ed --- /dev/null +++ b/components/engine/distribution/registry.go @@ -0,0 +1,156 @@ +package distribution + +import ( + "fmt" + "net" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/registry" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// ImageTypes represents the schema2 config types for images +var ImageTypes = []string{ + schema2.MediaTypeImageConfig, + // Handle unexpected values from https://github.com/docker/distribution/issues/1621 + // (see also https://github.com/docker/docker/issues/22378, + // https://github.com/docker/docker/issues/30083) + "application/octet-stream", + "application/json", + "text/html", + // Treat defaulted values as images, newer types cannot be implied + "", +} + +// PluginTypes represents the schema2 config types for plugins +var PluginTypes = []string{ + schema2.MediaTypePluginConfig, +} + +var mediaTypeClasses map[string]string + +func init() { + // initialize media type classes with all know types for + // plugin + mediaTypeClasses = map[string]string{} + for _, t := range ImageTypes { + mediaTypeClasses[t] = "image" + } + for _, t := range PluginTypes { + mediaTypeClasses[t] = "plugin" + } +} + +// NewV2Repository returns a repository (v2 only). It creates an HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { + repoName := repoInfo.Name.Name() + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = reference.Path(repoInfo.Name) + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + base.Dial = proxyDialer.Dial + } + + modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + + challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport) + if err != nil { + transportOK := false + if responseErr, ok := err.(registry.PingResponseError); ok { + transportOK = true + err = responseErr.Err + } + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: transportOK, + } + } + + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + scope := auth.RepositoryScope{ + Repository: repoName, + Actions: actions, + Class: repoInfo.Class, + } + + creds := registry.NewStaticCredentialStore(authConfig) + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{scope}, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + repoNameRef, err := reference.WithName(repoName) + if err != nil { + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + + repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr) + if err != nil { + err = fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + return +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} diff --git a/components/engine/distribution/registry_unit_test.go b/components/engine/distribution/registry_unit_test.go new file mode 100644 index 0000000000..ebe6ecad97 --- /dev/null +++ b/components/engine/distribution/registry_unit_test.go @@ -0,0 +1,172 @@ +package distribution + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "runtime" + "strings" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +const secretRegistryToken = "mysecrettoken" + +type tokenPassThruHandler struct { + reached bool + gotToken bool + shouldSend401 func(url string) bool +} + +func (h *tokenPassThruHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.reached = true + if strings.Contains(r.Header.Get("Authorization"), secretRegistryToken) { + logrus.Debug("Detected registry token in auth header") + h.gotToken = true + } + if h.shouldSend401 == nil || h.shouldSend401(r.RequestURI) { + w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) + w.WriteHeader(401) + } +} + +func testTokenPassThru(t *testing.T, ts *httptest.Server) { + tmp, err := testDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + uri, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("could not parse url from test server: %v", err) + } + + endpoint := registry.APIEndpoint{ + Mirror: false, + URL: uri, + Version: 2, + Official: false, + TrimHostname: false, + TLSConfig: nil, + } + n, _ := reference.ParseNormalizedNamed("testremotename") + repoInfo := ®istry.RepositoryInfo{ + Name: n, + Index: ®istrytypes.IndexInfo{ + Name: "testrepo", + Mirrors: nil, + Secure: false, + Official: false, + }, + Official: false, + } + imagePullConfig := &ImagePullConfig{ + Config: Config{ + MetaHeaders: http.Header{}, + AuthConfig: &types.AuthConfig{ + RegistryToken: secretRegistryToken, + }, + }, + Schema2Types: ImageTypes, + } + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + t.Fatal(err) + } + p := puller.(*v2Puller) + ctx := context.Background() + p.repo, _, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + t.Fatal(err) + } + + logrus.Debug("About to pull") + // We expect it to fail, since we haven't mock'd the full registry exchange in our handler above + tag, _ := reference.WithTag(n, "tag_goes_here") + _ = p.pullV2Repository(ctx, tag) +} + +func TestTokenPassThru(t *testing.T) { + handler := &tokenPassThruHandler{shouldSend401: func(url string) bool { return url == "/v2/" }} + ts := httptest.NewServer(handler) + defer ts.Close() + + testTokenPassThru(t, ts) + + if !handler.reached { + t.Fatal("Handler not reached") + } + if !handler.gotToken { + t.Fatal("Failed to receive registry token") + } +} + +func TestTokenPassThruDifferentHost(t *testing.T) { + handler := new(tokenPassThruHandler) + ts := httptest.NewServer(handler) + defer ts.Close() + + tsredirect := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/v2/" { + w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) + w.WriteHeader(401) + return + } + http.Redirect(w, r, ts.URL+r.URL.Path, http.StatusMovedPermanently) + })) + defer tsredirect.Close() + + testTokenPassThru(t, tsredirect) + + if !handler.reached { + t.Fatal("Handler not reached") + } + if handler.gotToken { + t.Fatal("Redirect should not forward Authorization header to another host") + } +} + +// testDirectory creates a new temporary directory and returns its path. +// The contents of directory at path `templateDir` is copied into the +// new directory. +func testDirectory(templateDir string) (dir string, err error) { + testID := stringid.GenerateNonCryptoID()[:4] + prefix := fmt.Sprintf("docker-test%s-%s-", testID, getCallerName(2)) + if prefix == "" { + prefix = "docker-test-" + } + dir, err = ioutil.TempDir("", prefix) + if err = os.Remove(dir); err != nil { + return + } + if templateDir != "" { + if err = archive.CopyWithTar(templateDir, dir); err != nil { + return + } + } + return +} + +// getCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func getCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} diff --git a/components/engine/distribution/utils/progress.go b/components/engine/distribution/utils/progress.go new file mode 100644 index 0000000000..cc3632a534 --- /dev/null +++ b/components/engine/distribution/utils/progress.go @@ -0,0 +1,44 @@ +package utils + +import ( + "io" + "net" + "os" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" +) + +// WriteDistributionProgress is a helper for writing progress from chan to JSON +// stream with an optional cancel function. +func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { + progressOutput := streamformatter.NewJSONProgressOutput(outStream, false) + operationCancelled := false + + for prog := range progressChan { + if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { + // don't log broken pipe errors as this is the normal case when a client aborts + if isBrokenPipe(err) { + logrus.Info("Pull session cancelled") + } else { + logrus.Errorf("error writing progress to client: %v", err) + } + cancelFunc() + operationCancelled = true + // Don't return, because we need to continue draining + // progressChan until it's closed to avoid a deadlock. + } + } +} + +func isBrokenPipe(e error) bool { + if netErr, ok := e.(*net.OpError); ok { + e = netErr.Err + if sysErr, ok := netErr.Err.(*os.SyscallError); ok { + e = sysErr.Err + } + } + return e == syscall.EPIPE +} diff --git a/components/engine/distribution/xfer/download.go b/components/engine/distribution/xfer/download.go new file mode 100644 index 0000000000..8bd48646d4 --- /dev/null +++ b/components/engine/distribution/xfer/download.go @@ -0,0 +1,463 @@ +package xfer + +import ( + "errors" + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxDownloadAttempts = 5 + +// LayerDownloadManager figures out which layers need to be downloaded, then +// registers and downloads those, taking into account dependencies between +// layers. +type LayerDownloadManager struct { + layerStore layer.Store + tm TransferManager + waitDuration time.Duration +} + +// SetConcurrency sets the max concurrent downloads for each pull +func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) { + ldm.tm.SetConcurrency(concurrency) +} + +// NewLayerDownloadManager returns a new LayerDownloadManager. +func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager { + manager := LayerDownloadManager{ + layerStore: layerStore, + tm: NewTransferManager(concurrencyLimit), + waitDuration: time.Second, + } + for _, option := range options { + option(&manager) + } + return &manager +} + +type downloadTransfer struct { + Transfer + + layerStore layer.Store + layer layer.Layer + err error +} + +// result returns the layer resulting from the download, if the download +// and registration were successful. +func (d *downloadTransfer) result() (layer.Layer, error) { + return d.layer, d.err +} + +// A DownloadDescriptor references a layer that may need to be downloaded. +type DownloadDescriptor interface { + // Key returns the key used to deduplicate downloads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer, or an error + // if it is unknown (for example, if it has not been downloaded + // before). + DiffID() (layer.DiffID, error) + // Download is called to perform the download. + Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) + // Close is called when the download manager is finished with this + // descriptor and will not call Download again or read from the reader + // that Download returned. + Close() +} + +// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an +// additional Registered method which gets called after a downloaded layer is +// registered. This allows the user of the download manager to know the DiffID +// of each registered layer. This method is called if a cast to +// DownloadDescriptorWithRegistered is successful. +type DownloadDescriptorWithRegistered interface { + DownloadDescriptor + Registered(diffID layer.DiffID) +} + +// Download is a blocking function which ensures the requested layers are +// present in the layer store. It uses the string returned by the Key method to +// deduplicate downloads. If a given layer is not already known to present in +// the layer store, and the key is not used by an in-progress download, the +// Download method is called to get the layer tar data. Layers are then +// registered in the appropriate order. The caller must call the returned +// release function once it is done with the returned RootFS object. +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + var ( + topLayer layer.Layer + topDownload *downloadTransfer + watcher *Watcher + missingLayer bool + transferKey = "" + downloadsByKey = make(map[string]*downloadTransfer) + ) + + rootFS := initialRootFS + for _, descriptor := range layers { + key := descriptor.Key() + transferKey += key + + if !missingLayer { + missingLayer = true + diffID, err := descriptor.DiffID() + if err == nil { + getRootFS := rootFS + getRootFS.Append(diffID) + l, err := ldm.layerStore.Get(getRootFS.ChainID()) + if err == nil { + // Layer already exists. + logrus.Debugf("Layer already exists: %s", descriptor.ID()) + progress.Update(progressOutput, descriptor.ID(), "Already exists") + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + topLayer = l + missingLayer = false + rootFS.Append(diffID) + // Register this repository as a source of this layer. + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(diffID) + } + continue + } + } + } + + // Does this layer have the same data as a previous layer in + // the stack? If so, avoid downloading it more than once. + var topDownloadUncasted Transfer + if existingDownload, ok := downloadsByKey[key]; ok { + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) + defer topDownload.Transfer.Release(watcher) + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + continue + } + + // Layer is not known to exist - download and register it. + progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") + + var xferFunc DoFunc + if topDownload != nil { + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) + defer topDownload.Transfer.Release(watcher) + } else { + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) + } + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + downloadsByKey[key] = topDownload + } + + if topDownload == nil { + return rootFS, func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }, nil + } + + // Won't be using the list built up so far - will generate it + // from downloaded layers instead. + rootFS.DiffIDs = []layer.DiffID{} + + defer func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }() + + select { + case <-ctx.Done(): + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, ctx.Err() + case <-topDownload.Done(): + break + } + + l, err := topDownload.result() + if err != nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, err + } + + // Must do this exactly len(layers) times, so we don't include the + // base layer on Windows. + for range layers { + if l == nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, errors.New("internal error: too few parent layers") + } + rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) + l = l.Parent() + } + return rootFS, func() { topDownload.Transfer.Release(watcher) }, err +} + +// makeDownloadFunc returns a function that performs the layer download and +// registration. If parentDownload is non-nil, it waits for that download to +// complete before the registration step, and registers the downloaded data +// on top of parentDownload's resulting layer. Otherwise, it registers the +// layer on top of the ChainID given by parentLayer. +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + if parentDownload != nil { + // Did the parent download already fail or get + // cancelled? + select { + case <-parentDownload.Done(): + _, err := parentDownload.result() + if err != nil { + d.err = err + return + } + default: + } + } + + var ( + downloadReader io.ReadCloser + size int64 + err error + retries int + ) + + defer descriptor.Close() + + for { + downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) + if err == nil { + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-d.Transfer.Context().Done(): + d.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { + logrus.Errorf("Download failed: %v", err) + d.err = err + return + } + + logrus.Errorf("Download failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(ldm.waitDuration) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-d.Transfer.Context().Done(): + ticker.Stop() + d.err = errors.New("download cancelled during retry delay") + return + } + + } + } + + close(inactive) + + if parentDownload != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + downloadReader.Close() + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + downloadReader.Close() + return + } + parentLayer = l.ChainID() + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") + defer reader.Close() + + inflatedLayerData, err := archive.DecompressStream(reader) + if err != nil { + d.err = fmt.Errorf("could not get decompression stream: %v", err) + return + } + + var src distribution.Descriptor + if fs, ok := descriptor.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src) + } else { + d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) + } + if err != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + default: + d.err = fmt.Errorf("failed to register layer: %v", err) + } + return + } + + progress.Update(progressOutput, descriptor.ID(), "Pull complete") + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} + +// makeDownloadFuncFromDownload returns a function that performs the layer +// registration when the layer data is coming from an existing download. It +// waits for sourceDownload and parentDownload to complete, and then +// reregisters the data from sourceDownload's top layer on top of +// parentDownload. This function does not log progress output because it would +// interfere with the progress reporting for sourceDownload, which has the same +// Key. +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + <-start + + close(inactive) + + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + return + } + parentLayer := l.ChainID() + + // sourceDownload should have already finished if + // parentDownload finished, but wait for it explicitly + // to be sure. + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-sourceDownload.Done(): + } + + l, err = sourceDownload.result() + if err != nil { + d.err = err + return + } + + layerReader, err := l.TarStream() + if err != nil { + d.err = err + return + } + defer layerReader.Close() + + var src distribution.Descriptor + if fs, ok := l.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, src) + } else { + d.layer, err = d.layerStore.Register(layerReader, parentLayer) + } + if err != nil { + d.err = fmt.Errorf("failed to register layer: %v", err) + return + } + + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} diff --git a/components/engine/distribution/xfer/download_test.go b/components/engine/distribution/xfer/download_test.go new file mode 100644 index 0000000000..69323bb869 --- /dev/null +++ b/components/engine/distribution/xfer/download_test.go @@ -0,0 +1,357 @@ +package xfer + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "runtime" + "sync/atomic" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +const maxDownloadConcurrency = 3 + +type mockLayer struct { + layerData bytes.Buffer + diffID layer.DiffID + chainID layer.ChainID + parent layer.Layer +} + +func (ml *mockLayer) TarStream() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewBuffer(ml.layerData.Bytes())), nil +} + +func (ml *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) { + return nil, fmt.Errorf("not implemented") +} + +func (ml *mockLayer) ChainID() layer.ChainID { + return ml.chainID +} + +func (ml *mockLayer) DiffID() layer.DiffID { + return ml.diffID +} + +func (ml *mockLayer) Parent() layer.Layer { + return ml.parent +} + +func (ml *mockLayer) Size() (size int64, err error) { + return 0, nil +} + +func (ml *mockLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (ml *mockLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} + +type mockLayerStore struct { + layers map[layer.ChainID]*mockLayer +} + +func createChainIDFromParent(parent layer.ChainID, dgsts ...layer.DiffID) layer.ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(layer.ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...) +} + +func (ls *mockLayerStore) Map() map[layer.ChainID]layer.Layer { + layers := map[layer.ChainID]layer.Layer{} + + for k, v := range ls.layers { + layers[k] = v + } + + return layers +} + +func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (layer.Layer, error) { + return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{}) +} + +func (ls *mockLayerStore) RegisterWithDescriptor(reader io.Reader, parentID layer.ChainID, _ distribution.Descriptor) (layer.Layer, error) { + var ( + parent layer.Layer + err error + ) + + if parentID != "" { + parent, err = ls.Get(parentID) + if err != nil { + return nil, err + } + } + + l := &mockLayer{parent: parent} + _, err = l.layerData.ReadFrom(reader) + if err != nil { + return nil, err + } + l.diffID = layer.DiffID(digest.FromBytes(l.layerData.Bytes())) + l.chainID = createChainIDFromParent(parentID, l.diffID) + + ls.layers[l.chainID] = l + return l, nil +} + +func (ls *mockLayerStore) Get(chainID layer.ChainID) (layer.Layer, error) { + l, ok := ls.layers[chainID] + if !ok { + return nil, layer.ErrLayerDoesNotExist + } + return l, nil +} + +func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) { + return []layer.Metadata{}, nil +} +func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, *layer.CreateRWLayerOpts) (layer.RWLayer, error) { + return nil, errors.New("not implemented") +} + +func (ls *mockLayerStore) GetRWLayer(string) (layer.RWLayer, error) { + return nil, errors.New("not implemented") +} + +func (ls *mockLayerStore) ReleaseRWLayer(layer.RWLayer) ([]layer.Metadata, error) { + return nil, errors.New("not implemented") +} +func (ls *mockLayerStore) GetMountID(string) (string, error) { + return "", errors.New("not implemented") +} + +func (ls *mockLayerStore) Cleanup() error { + return nil +} + +func (ls *mockLayerStore) DriverStatus() [][2]string { + return [][2]string{} +} + +func (ls *mockLayerStore) DriverName() string { + return "mock" +} + +type mockDownloadDescriptor struct { + currentDownloads *int32 + id string + diffID layer.DiffID + registeredDiffID layer.DiffID + expectedDiffID layer.DiffID + simulateRetries int +} + +// Key returns the key used to deduplicate downloads. +func (d *mockDownloadDescriptor) Key() string { + return d.id +} + +// ID returns the ID for display purposes. +func (d *mockDownloadDescriptor) ID() string { + return d.id +} + +// DiffID should return the DiffID for this layer, or an error +// if it is unknown (for example, if it has not been downloaded +// before). +func (d *mockDownloadDescriptor) DiffID() (layer.DiffID, error) { + if d.diffID != "" { + return d.diffID, nil + } + return "", errors.New("no diffID available") +} + +func (d *mockDownloadDescriptor) Registered(diffID layer.DiffID) { + d.registeredDiffID = diffID +} + +func (d *mockDownloadDescriptor) mockTarStream() io.ReadCloser { + // The mock implementation returns the ID repeated 5 times as a tar + // stream instead of actual tar data. The data is ignored except for + // computing IDs. + return ioutil.NopCloser(bytes.NewBuffer([]byte(d.id + d.id + d.id + d.id + d.id))) +} + +// Download is called to perform the download. +func (d *mockDownloadDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + if d.currentDownloads != nil { + defer atomic.AddInt32(d.currentDownloads, -1) + + if atomic.AddInt32(d.currentDownloads, 1) > maxDownloadConcurrency { + return nil, 0, errors.New("concurrency limit exceeded") + } + } + + // Sleep a bit to simulate a time-consuming download. + for i := int64(0); i <= 10; i++ { + select { + case <-ctx.Done(): + return nil, 0, ctx.Err() + case <-time.After(10 * time.Millisecond): + progressOutput.WriteProgress(progress.Progress{ID: d.ID(), Action: "Downloading", Current: i, Total: 10}) + } + } + + if d.simulateRetries != 0 { + d.simulateRetries-- + return nil, 0, errors.New("simulating retry") + } + + return d.mockTarStream(), 0, nil +} + +func (d *mockDownloadDescriptor) Close() { +} + +func downloadDescriptors(currentDownloads *int32) []DownloadDescriptor { + return []DownloadDescriptor{ + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id1", + expectedDiffID: layer.DiffID("sha256:68e2c75dc5c78ea9240689c60d7599766c213ae210434c53af18470ae8c53ec1"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id2", + expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id3", + expectedDiffID: layer.DiffID("sha256:58745a8bbd669c25213e9de578c4da5c8ee1c836b3581432c2b50e38a6753300"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id2", + expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id4", + expectedDiffID: layer.DiffID("sha256:0dfb5b9577716cc173e95af7c10289322c29a6453a1718addc00c0c5b1330936"), + simulateRetries: 1, + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id5", + expectedDiffID: layer.DiffID("sha256:0a5f25fa1acbc647f6112a6276735d0fa01e4ee2aa7ec33015e337350e1ea23d"), + }, + } +} + +func TestSuccessfulDownload(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + + layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} + ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond }) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]progress.Progress) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p + } + close(progressDone) + }() + + var currentDownloads int32 + descriptors := downloadDescriptors(¤tDownloads) + + firstDescriptor := descriptors[0].(*mockDownloadDescriptor) + + // Pre-register the first layer to simulate an already-existing layer + l, err := layerStore.Register(firstDescriptor.mockTarStream(), "") + if err != nil { + t.Fatal(err) + } + firstDescriptor.diffID = l.DiffID() + + rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) + if err != nil { + t.Fatalf("download error: %v", err) + } + + releaseFunc() + + close(progressChan) + <-progressDone + + if len(rootFS.DiffIDs) != len(descriptors) { + t.Fatal("got wrong number of diffIDs in rootfs") + } + + for i, d := range descriptors { + descriptor := d.(*mockDownloadDescriptor) + + if descriptor.diffID != "" { + if receivedProgress[d.ID()].Action != "Already exists" { + t.Fatalf("did not get 'Already exists' message for %v", d.ID()) + } + } else if receivedProgress[d.ID()].Action != "Pull complete" { + t.Fatalf("did not get 'Pull complete' message for %v", d.ID()) + } + + if rootFS.DiffIDs[i] != descriptor.expectedDiffID { + t.Fatalf("rootFS item %d has the wrong diffID (expected: %v got: %v)", i, descriptor.expectedDiffID, rootFS.DiffIDs[i]) + } + + if descriptor.diffID == "" && descriptor.registeredDiffID != rootFS.DiffIDs[i] { + t.Fatal("diffID mismatch between rootFS and Registered callback") + } + } +} + +func TestCancelledDownload(t *testing.T) { + ldm := NewLayerDownloadManager(&mockLayerStore{make(map[layer.ChainID]*mockLayer)}, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond }) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + + go func() { + for range progressChan { + } + close(progressDone) + }() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-time.After(time.Millisecond) + cancel() + }() + + descriptors := downloadDescriptors(nil) + _, _, err := ldm.Download(ctx, *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) + if err != context.Canceled { + t.Fatal("expected download to be cancelled") + } + + close(progressChan) + <-progressDone +} diff --git a/components/engine/distribution/xfer/transfer.go b/components/engine/distribution/xfer/transfer.go new file mode 100644 index 0000000000..b86c503a08 --- /dev/null +++ b/components/engine/distribution/xfer/transfer.go @@ -0,0 +1,401 @@ +package xfer + +import ( + "runtime" + "sync" + + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +// DoNotRetry is an error wrapper indicating that the error cannot be resolved +// with a retry. +type DoNotRetry struct { + Err error +} + +// Error returns the stringified representation of the encapsulated error. +func (e DoNotRetry) Error() string { + return e.Err.Error() +} + +// Watcher is returned by Watch and can be passed to Release to stop watching. +type Watcher struct { + // signalChan is used to signal to the watcher goroutine that + // new progress information is available, or that the transfer + // has finished. + signalChan chan struct{} + // releaseChan signals to the watcher goroutine that the watcher + // should be detached. + releaseChan chan struct{} + // running remains open as long as the watcher is watching the + // transfer. It gets closed if the transfer finishes or the + // watcher is detached. + running chan struct{} +} + +// Transfer represents an in-progress transfer. +type Transfer interface { + Watch(progressOutput progress.Output) *Watcher + Release(*Watcher) + Context() context.Context + Close() + Done() <-chan struct{} + Released() <-chan struct{} + Broadcast(masterProgressChan <-chan progress.Progress) +} + +type transfer struct { + mu sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // watchers keeps track of the goroutines monitoring progress output, + // indexed by the channels that release them. + watchers map[chan struct{}]*Watcher + + // lastProgress is the most recently received progress event. + lastProgress progress.Progress + // hasLastProgress is true when lastProgress has been set. + hasLastProgress bool + + // running remains open as long as the transfer is in progress. + running chan struct{} + // released stays open until all watchers release the transfer and + // the transfer is no longer tracked by the transfer manager. + released chan struct{} + + // broadcastDone is true if the master progress channel has closed. + broadcastDone bool + // closed is true if Close has been called + closed bool + // broadcastSyncChan allows watchers to "ping" the broadcasting + // goroutine to wait for it for deplete its input channel. This ensures + // a detaching watcher won't miss an event that was sent before it + // started detaching. + broadcastSyncChan chan struct{} +} + +// NewTransfer creates a new transfer. +func NewTransfer() Transfer { + t := &transfer{ + watchers: make(map[chan struct{}]*Watcher), + running: make(chan struct{}), + released: make(chan struct{}), + broadcastSyncChan: make(chan struct{}), + } + + // This uses context.Background instead of a caller-supplied context + // so that a transfer won't be cancelled automatically if the client + // which requested it is ^C'd (there could be other viewers). + t.ctx, t.cancel = context.WithCancel(context.Background()) + + return t +} + +// Broadcast copies the progress and error output to all viewers. +func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { + for { + var ( + p progress.Progress + ok bool + ) + select { + case p, ok = <-masterProgressChan: + default: + // We've depleted the channel, so now we can handle + // reads on broadcastSyncChan to let detaching watchers + // know we're caught up. + select { + case <-t.broadcastSyncChan: + continue + case p, ok = <-masterProgressChan: + } + } + + t.mu.Lock() + if ok { + t.lastProgress = p + t.hasLastProgress = true + for _, w := range t.watchers { + select { + case w.signalChan <- struct{}{}: + default: + } + } + } else { + t.broadcastDone = true + } + t.mu.Unlock() + if !ok { + close(t.running) + return + } + } +} + +// Watch adds a watcher to the transfer. The supplied channel gets progress +// updates and is closed when the transfer finishes. +func (t *transfer) Watch(progressOutput progress.Output) *Watcher { + t.mu.Lock() + defer t.mu.Unlock() + + w := &Watcher{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + + t.watchers[w.releaseChan] = w + + if t.broadcastDone { + close(w.running) + return w + } + + go func() { + defer func() { + close(w.running) + }() + var ( + done bool + lastWritten progress.Progress + hasLastWritten bool + ) + for { + t.mu.Lock() + hasLastProgress := t.hasLastProgress + lastProgress := t.lastProgress + t.mu.Unlock() + + // Make sure we don't write the last progress item + // twice. + if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { + progressOutput.WriteProgress(lastProgress) + lastWritten = lastProgress + hasLastWritten = true + } + + if done { + return + } + + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + // Since the watcher is going to detach, make + // sure the broadcaster is caught up so we + // don't miss anything. + select { + case t.broadcastSyncChan <- struct{}{}: + case <-t.running: + } + case <-t.running: + done = true + } + } + }() + + return w +} + +// Release is the inverse of Watch; indicating that the watcher no longer wants +// to be notified about the progress of the transfer. All calls to Watch must +// be paired with later calls to Release so that the lifecycle of the transfer +// is properly managed. +func (t *transfer) Release(watcher *Watcher) { + t.mu.Lock() + delete(t.watchers, watcher.releaseChan) + + if len(t.watchers) == 0 { + if t.closed { + // released may have been closed already if all + // watchers were released, then another one was added + // while waiting for a previous watcher goroutine to + // finish. + select { + case <-t.released: + default: + close(t.released) + } + } else { + t.cancel() + } + } + t.mu.Unlock() + + close(watcher.releaseChan) + // Block until the watcher goroutine completes + <-watcher.running +} + +// Done returns a channel which is closed if the transfer completes or is +// cancelled. Note that having 0 watchers causes a transfer to be cancelled. +func (t *transfer) Done() <-chan struct{} { + // Note that this doesn't return t.ctx.Done() because that channel will + // be closed the moment Cancel is called, and we need to return a + // channel that blocks until a cancellation is actually acknowledged by + // the transfer function. + return t.running +} + +// Released returns a channel which is closed once all watchers release the +// transfer AND the transfer is no longer tracked by the transfer manager. +func (t *transfer) Released() <-chan struct{} { + return t.released +} + +// Context returns the context associated with the transfer. +func (t *transfer) Context() context.Context { + return t.ctx +} + +// Close is called by the transfer manager when the transfer is no longer +// being tracked. +func (t *transfer) Close() { + t.mu.Lock() + t.closed = true + if len(t.watchers) == 0 { + close(t.released) + } + t.mu.Unlock() +} + +// DoFunc is a function called by the transfer manager to actually perform +// a transfer. It should be non-blocking. It should wait until the start channel +// is closed before transferring any data. If the function closes inactive, that +// signals to the transfer manager that the job is no longer actively moving +// data - for example, it may be waiting for a dependent transfer to finish. +// This prevents it from taking up a slot. +type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer + +// TransferManager is used by LayerDownloadManager and LayerUploadManager to +// schedule and deduplicate transfers. It is up to the TransferManager +// implementation to make the scheduling and concurrency decisions. +type TransferManager interface { + // Transfer checks if a transfer with the given key is in progress. If + // so, it returns progress and error output from that transfer. + // Otherwise, it will call xferFunc to initiate the transfer. + Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) + // SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload + SetConcurrency(concurrency int) +} + +type transferManager struct { + mu sync.Mutex + + concurrencyLimit int + activeTransfers int + transfers map[string]Transfer + waitingTransfers []chan struct{} +} + +// NewTransferManager returns a new TransferManager. +func NewTransferManager(concurrencyLimit int) TransferManager { + return &transferManager{ + concurrencyLimit: concurrencyLimit, + transfers: make(map[string]Transfer), + } +} + +// SetConcurrency sets the concurrencyLimit +func (tm *transferManager) SetConcurrency(concurrency int) { + tm.mu.Lock() + tm.concurrencyLimit = concurrency + tm.mu.Unlock() +} + +// Transfer checks if a transfer matching the given key is in progress. If not, +// it starts one by calling xferFunc. The caller supplies a channel which +// receives progress output from the transfer. +func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { + tm.mu.Lock() + defer tm.mu.Unlock() + + for { + xfer, present := tm.transfers[key] + if !present { + break + } + // Transfer is already in progress. + watcher := xfer.Watch(progressOutput) + + select { + case <-xfer.Context().Done(): + // We don't want to watch a transfer that has been cancelled. + // Wait for it to be removed from the map and try again. + xfer.Release(watcher) + tm.mu.Unlock() + // The goroutine that removes this transfer from the + // map is also waiting for xfer.Done(), so yield to it. + // This could be avoided by adding a Closed method + // to Transfer to allow explicitly waiting for it to be + // removed the map, but forcing a scheduling round in + // this very rare case seems better than bloating the + // interface definition. + runtime.Gosched() + <-xfer.Done() + tm.mu.Lock() + default: + return xfer, watcher + } + } + + start := make(chan struct{}) + inactive := make(chan struct{}) + + if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit { + close(start) + tm.activeTransfers++ + } else { + tm.waitingTransfers = append(tm.waitingTransfers, start) + } + + masterProgressChan := make(chan progress.Progress) + xfer := xferFunc(masterProgressChan, start, inactive) + watcher := xfer.Watch(progressOutput) + go xfer.Broadcast(masterProgressChan) + tm.transfers[key] = xfer + + // When the transfer is finished, remove from the map. + go func() { + for { + select { + case <-inactive: + tm.mu.Lock() + tm.inactivate(start) + tm.mu.Unlock() + inactive = nil + case <-xfer.Done(): + tm.mu.Lock() + if inactive != nil { + tm.inactivate(start) + } + delete(tm.transfers, key) + tm.mu.Unlock() + xfer.Close() + return + } + } + }() + + return xfer, watcher +} + +func (tm *transferManager) inactivate(start chan struct{}) { + // If the transfer was started, remove it from the activeTransfers + // count. + select { + case <-start: + // Start next transfer if any are waiting + if len(tm.waitingTransfers) != 0 { + close(tm.waitingTransfers[0]) + tm.waitingTransfers = tm.waitingTransfers[1:] + } else { + tm.activeTransfers-- + } + default: + } +} diff --git a/components/engine/distribution/xfer/transfer_test.go b/components/engine/distribution/xfer/transfer_test.go new file mode 100644 index 0000000000..6c50ce3524 --- /dev/null +++ b/components/engine/distribution/xfer/transfer_test.go @@ -0,0 +1,410 @@ +package xfer + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/docker/docker/pkg/progress" +) + +func TestTransfer(t *testing.T) { + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + select { + case <-start: + default: + t.Fatalf("transfer function not started even though concurrency limit not reached") + } + + xfer := NewTransfer() + go func() { + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(5) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + val, present := receivedProgress[p.ID] + if present && p.Current <= val { + t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1) + } + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start a few transfers + ids := []string{"id1", "id2", "id3"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestConcurrencyLimit(t *testing.T) { + concurrencyLimit := 3 + var runningJobs int32 + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + <-start + totalJobs := atomic.AddInt32(&runningJobs, 1) + if int(totalJobs) > concurrencyLimit { + t.Fatalf("too many jobs running") + } + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + atomic.AddInt32(&runningJobs, -1) + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(concurrencyLimit) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start more transfers than the concurrency limit + ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestInactiveJobs(t *testing.T) { + concurrencyLimit := 3 + var runningJobs int32 + testDone := make(chan struct{}) + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + <-start + totalJobs := atomic.AddInt32(&runningJobs, 1) + if int(totalJobs) > concurrencyLimit { + t.Fatalf("too many jobs running") + } + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + atomic.AddInt32(&runningJobs, -1) + close(inactive) + <-testDone + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(concurrencyLimit) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start more transfers than the concurrency limit + ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + close(testDone) + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestWatchRelease(t *testing.T) { + ready := make(chan struct{}) + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + defer func() { + close(progressChan) + }() + <-ready + for i := int64(0); ; i++ { + select { + case <-time.After(10 * time.Millisecond): + case <-xfer.Context().Done(): + return + } + progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} + } + }() + return xfer + } + } + + tm := NewTransferManager(5) + + type watcherInfo struct { + watcher *Watcher + progressChan chan progress.Progress + progressDone chan struct{} + receivedFirstProgress chan struct{} + } + + progressConsumer := func(w watcherInfo) { + first := true + for range w.progressChan { + if first { + close(w.receivedFirstProgress) + } + first = false + } + close(w.progressDone) + } + + // Start a transfer + watchers := make([]watcherInfo, 5) + var xfer Transfer + watchers[0].progressChan = make(chan progress.Progress) + watchers[0].progressDone = make(chan struct{}) + watchers[0].receivedFirstProgress = make(chan struct{}) + xfer, watchers[0].watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(watchers[0].progressChan)) + go progressConsumer(watchers[0]) + + // Give it multiple watchers + for i := 1; i != len(watchers); i++ { + watchers[i].progressChan = make(chan progress.Progress) + watchers[i].progressDone = make(chan struct{}) + watchers[i].receivedFirstProgress = make(chan struct{}) + watchers[i].watcher = xfer.Watch(progress.ChanOutput(watchers[i].progressChan)) + go progressConsumer(watchers[i]) + } + + // Now that the watchers are set up, allow the transfer goroutine to + // proceed. + close(ready) + + // Confirm that each watcher gets progress output. + for _, w := range watchers { + <-w.receivedFirstProgress + } + + // Release one watcher every 5ms + for _, w := range watchers { + xfer.Release(w.watcher) + <-time.After(5 * time.Millisecond) + } + + // Now that all watchers have been released, Released() should + // return a closed channel. + <-xfer.Released() + + // Done() should return a closed channel because the xfer func returned + // due to cancellation. + <-xfer.Done() + + for _, w := range watchers { + close(w.progressChan) + <-w.progressDone + } +} + +func TestWatchFinishedTransfer(t *testing.T) { + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + // Finish immediately + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(5) + + // Start a transfer + watchers := make([]*Watcher, 3) + var xfer Transfer + xfer, watchers[0] = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(make(chan progress.Progress))) + + // Give it a watcher immediately + watchers[1] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Wait for the transfer to complete + <-xfer.Done() + + // Set up another watcher + watchers[2] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Release the watchers + for _, w := range watchers { + xfer.Release(w) + } + + // Now that all watchers have been released, Released() should + // return a closed channel. + <-xfer.Released() +} + +func TestDuplicateTransfer(t *testing.T) { + ready := make(chan struct{}) + + var xferFuncCalls int32 + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + atomic.AddInt32(&xferFuncCalls, 1) + xfer := NewTransfer() + go func() { + defer func() { + close(progressChan) + }() + <-ready + for i := int64(0); ; i++ { + select { + case <-time.After(10 * time.Millisecond): + case <-xfer.Context().Done(): + return + } + progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} + } + }() + return xfer + } + } + + tm := NewTransferManager(5) + + type transferInfo struct { + xfer Transfer + watcher *Watcher + progressChan chan progress.Progress + progressDone chan struct{} + receivedFirstProgress chan struct{} + } + + progressConsumer := func(t transferInfo) { + first := true + for range t.progressChan { + if first { + close(t.receivedFirstProgress) + } + first = false + } + close(t.progressDone) + } + + // Try to start multiple transfers with the same ID + transfers := make([]transferInfo, 5) + for i := range transfers { + t := &transfers[i] + t.progressChan = make(chan progress.Progress) + t.progressDone = make(chan struct{}) + t.receivedFirstProgress = make(chan struct{}) + t.xfer, t.watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(t.progressChan)) + go progressConsumer(*t) + } + + // Allow the transfer goroutine to proceed. + close(ready) + + // Confirm that each watcher gets progress output. + for _, t := range transfers { + <-t.receivedFirstProgress + } + + // Confirm that the transfer function was called exactly once. + if xferFuncCalls != 1 { + t.Fatal("transfer function wasn't called exactly once") + } + + // Release one watcher every 5ms + for _, t := range transfers { + t.xfer.Release(t.watcher) + <-time.After(5 * time.Millisecond) + } + + for _, t := range transfers { + // Now that all watchers have been released, Released() should + // return a closed channel. + <-t.xfer.Released() + // Done() should return a closed channel because the xfer func returned + // due to cancellation. + <-t.xfer.Done() + } + + for _, t := range transfers { + close(t.progressChan) + <-t.progressDone + } +} diff --git a/components/engine/distribution/xfer/upload.go b/components/engine/distribution/xfer/upload.go new file mode 100644 index 0000000000..58422e57a1 --- /dev/null +++ b/components/engine/distribution/xfer/upload.go @@ -0,0 +1,174 @@ +package xfer + +import ( + "errors" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadAttempts = 5 + +// LayerUploadManager provides task management and progress reporting for +// uploads. +type LayerUploadManager struct { + tm TransferManager + waitDuration time.Duration +} + +// SetConcurrency sets the max concurrent uploads for each push +func (lum *LayerUploadManager) SetConcurrency(concurrency int) { + lum.tm.SetConcurrency(concurrency) +} + +// NewLayerUploadManager returns a new LayerUploadManager. +func NewLayerUploadManager(concurrencyLimit int, options ...func(*LayerUploadManager)) *LayerUploadManager { + manager := LayerUploadManager{ + tm: NewTransferManager(concurrencyLimit), + waitDuration: time.Second, + } + for _, option := range options { + option(&manager) + } + return &manager +} + +type uploadTransfer struct { + Transfer + + remoteDescriptor distribution.Descriptor + err error +} + +// An UploadDescriptor references a layer that may need to be uploaded. +type UploadDescriptor interface { + // Key returns the key used to deduplicate uploads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer. + DiffID() layer.DiffID + // Upload is called to perform the Upload. + Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) + // SetRemoteDescriptor provides the distribution.Descriptor that was + // returned by Upload. This descriptor is not to be confused with + // the UploadDescriptor interface, which is used for internally + // identifying layers that are being uploaded. + SetRemoteDescriptor(descriptor distribution.Descriptor) +} + +// Upload is a blocking function which ensures the listed layers are present on +// the remote registry. It uses the string returned by the Key method to +// deduplicate uploads. +func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { + var ( + uploads []*uploadTransfer + dedupDescriptors = make(map[string]*uploadTransfer) + ) + + for _, descriptor := range layers { + progress.Update(progressOutput, descriptor.ID(), "Preparing") + + key := descriptor.Key() + if _, present := dedupDescriptors[key]; present { + continue + } + + xferFunc := lum.makeUploadFunc(descriptor) + upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) + defer upload.Release(watcher) + uploads = append(uploads, upload.(*uploadTransfer)) + dedupDescriptors[key] = upload.(*uploadTransfer) + } + + for _, upload := range uploads { + select { + case <-ctx.Done(): + return ctx.Err() + case <-upload.Transfer.Done(): + if upload.err != nil { + return upload.err + } + } + } + for _, l := range layers { + l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) + } + + return nil +} + +func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + u := &uploadTransfer{ + Transfer: NewTransfer(), + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + retries := 0 + for { + remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) + if err == nil { + u.remoteDescriptor = remoteDescriptor + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-u.Transfer.Context().Done(): + u.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { + logrus.Errorf("Upload failed: %v", err) + u.err = err + return + } + + logrus.Errorf("Upload failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(lum.waitDuration) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-u.Transfer.Context().Done(): + ticker.Stop() + u.err = errors.New("upload cancelled during retry delay") + return + } + } + } + }() + + return u + } +} diff --git a/components/engine/distribution/xfer/upload_test.go b/components/engine/distribution/xfer/upload_test.go new file mode 100644 index 0000000000..066019f263 --- /dev/null +++ b/components/engine/distribution/xfer/upload_test.go @@ -0,0 +1,134 @@ +package xfer + +import ( + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadConcurrency = 3 + +type mockUploadDescriptor struct { + currentUploads *int32 + diffID layer.DiffID + simulateRetries int +} + +// Key returns the key used to deduplicate downloads. +func (u *mockUploadDescriptor) Key() string { + return u.diffID.String() +} + +// ID returns the ID for display purposes. +func (u *mockUploadDescriptor) ID() string { + return u.diffID.String() +} + +// DiffID should return the DiffID for this layer. +func (u *mockUploadDescriptor) DiffID() layer.DiffID { + return u.diffID +} + +// SetRemoteDescriptor is not used in the mock. +func (u *mockUploadDescriptor) SetRemoteDescriptor(remoteDescriptor distribution.Descriptor) { +} + +// Upload is called to perform the upload. +func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + if u.currentUploads != nil { + defer atomic.AddInt32(u.currentUploads, -1) + + if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency { + return distribution.Descriptor{}, errors.New("concurrency limit exceeded") + } + } + + // Sleep a bit to simulate a time-consuming upload. + for i := int64(0); i <= 10; i++ { + select { + case <-ctx.Done(): + return distribution.Descriptor{}, ctx.Err() + case <-time.After(10 * time.Millisecond): + progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10}) + } + } + + if u.simulateRetries != 0 { + u.simulateRetries-- + return distribution.Descriptor{}, errors.New("simulating retry") + } + + return distribution.Descriptor{}, nil +} + +func uploadDescriptors(currentUploads *int32) []UploadDescriptor { + return []UploadDescriptor{ + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"), 1}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"), 0}, + } +} + +func TestSuccessfulUpload(t *testing.T) { + lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond }) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + var currentUploads int32 + descriptors := uploadDescriptors(¤tUploads) + + err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan)) + if err != nil { + t.Fatalf("upload error: %v", err) + } + + close(progressChan) + <-progressDone +} + +func TestCancelledUpload(t *testing.T) { + lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond }) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + + go func() { + for range progressChan { + } + close(progressDone) + }() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-time.After(time.Millisecond) + cancel() + }() + + descriptors := uploadDescriptors(nil) + err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan)) + if err != context.Canceled { + t.Fatal("expected upload to be cancelled") + } + + close(progressChan) + <-progressDone +} diff --git a/components/engine/dockerversion/useragent.go b/components/engine/dockerversion/useragent.go new file mode 100644 index 0000000000..53632cbc35 --- /dev/null +++ b/components/engine/dockerversion/useragent.go @@ -0,0 +1,76 @@ +package dockerversion + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" + "golang.org/x/net/context" +) + +// UAStringKey is used as key type for user-agent string in net/context struct +const UAStringKey = "upstream-user-agent" + +// DockerUserAgent is the User-Agent the Docker client uses to identify itself. +// In accordance with RFC 7231 (5.5.3) is of the form: +// [docker client's UA] UpstreamClient([upstream client's UA]) +func DockerUserAgent(ctx context.Context) string { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUA := useragent.AppendVersions("", httpVersion...) + upstreamUA := getUserAgentFromContext(ctx) + if len(upstreamUA) > 0 { + ret := insertUpstreamUserAgent(upstreamUA, dockerUA) + return ret + } + return dockerUA +} + +// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists +func getUserAgentFromContext(ctx context.Context) string { + var upstreamUA string + if ctx != nil { + var ki interface{} = ctx.Value(UAStringKey) + if ki != nil { + upstreamUA = ctx.Value(UAStringKey).(string) + } + } + return upstreamUA +} + +// escapeStr returns s with every rune in charsToEscape escaped by a backslash +func escapeStr(s string, charsToEscape string) string { + var ret string + for _, currRune := range s { + appended := false + for _, escapeableRune := range charsToEscape { + if currRune == escapeableRune { + ret += `\` + string(currRune) + appended = true + break + } + } + if !appended { + ret += string(currRune) + } + } + return ret +} + +// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent +// string of the form: +// $dockerUA UpstreamClient($upstreamUA) +func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { + charsToEscape := `();\` + upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) + return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) +} diff --git a/components/engine/dockerversion/version_lib.go b/components/engine/dockerversion/version_lib.go new file mode 100644 index 0000000000..33f77d3ce6 --- /dev/null +++ b/components/engine/dockerversion/version_lib.go @@ -0,0 +1,16 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + IAmStatic string = "library-import" + ContainerdCommitID string = "library-import" + RuncCommitID string = "library-import" + InitCommitID string = "library-import" +) diff --git a/components/engine/docs/README.md b/components/engine/docs/README.md new file mode 100644 index 0000000000..da93093075 --- /dev/null +++ b/components/engine/docs/README.md @@ -0,0 +1,30 @@ +# The non-reference docs have been moved! + + + +The documentation for Docker Engine has been merged into +[the general documentation repo](https://github.com/docker/docker.github.io). + +See the [README](https://github.com/docker/docker.github.io/blob/master/README.md) +for instructions on contributing to and building the documentation. + +If you'd like to edit the current published version of the Engine docs, +do it in the master branch here: +https://github.com/docker/docker.github.io/tree/master/engine + +If you need to document the functionality of an upcoming Engine release, +use the `vnext-engine` branch: +https://github.com/docker/docker.github.io/tree/vnext-engine/engine + +The reference docs have been left in docker/docker (this repo), which remains +the place to edit them. + +The docs in the general repo are open-source and we appreciate +your feedback and pull requests! diff --git a/components/engine/docs/api/v1.18.md b/components/engine/docs/api/v1.18.md new file mode 100644 index 0000000000..3c82371b4f --- /dev/null +++ b/components/engine/docs/api/v1.18.md @@ -0,0 +1,2159 @@ +--- +title: "Engine API v1.18" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.18/ +- /reference/api/docker_remote_api_v1.18/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.18/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.18/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpuShares": 0, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.18/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000, + "throttling_data" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.18/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.18/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.18/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.18/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.18/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.18/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.18/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /v1.18/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.18/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +**Example request, with digest information**: + + GET /v1.18/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728 + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.18/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.18/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.18/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.18/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.18/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.18/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.18/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.18/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.18/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "star_count": 12, + "is_official": false, + "name": "wma55/u1210sshd", + "is_automated": false, + "description": "" + }, + { + "star_count": 10, + "is_official": false, + "name": "jdswinbank/sshd", + "is_automated": false, + "description": "" + }, + { + "star_count": 18, + "is_official": false, + "name": "vgauthier/sshd", + "is_automated": false, + "description": "" + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.18/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.18/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "Debug": 0, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": 1, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": 1, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": 0, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.18/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.18" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.18/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.18/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + create, destroy, die, exec_create, exec_start, export, kill, oom, pause, restart, start, stop, unpause + +Docker images report the following events: + + untag, delete + +**Example request**: + + GET /v1.18/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.18/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.18/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.18/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. + + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.18/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.18/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.18/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs": null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + +This might change in the future. + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/components/engine/docs/api/v1.19.md b/components/engine/docs/api/v1.19.md new file mode 100644 index 0000000000..bd5f09f670 --- /dev/null +++ b/components/engine/docs/api/v1.19.md @@ -0,0 +1,2241 @@ +--- +title: "Engine API v1.19" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.19/ +- /reference/api/docker_remote_api_v1.19/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.19/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.19/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `syslog` available options are: `address`. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.19/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.19/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.19/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.19/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.19/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.19/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.19/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.19/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /v1.19/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.19/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.19/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.19/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the + URI specifies a filename, the file's contents are placed into a file + called `Dockerfile`. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.19/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.19/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.19/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.19/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.19/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.19/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.19/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). This API +returns both `is_trusted` and `is_automated` images. Currently, they +are considered identical. In the future, the `is_trusted` property will +be deprecated and replaced by the `is_automated` property. + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.19/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "star_count": 12, + "is_official": false, + "name": "wma55/u1210sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + }, + { + "star_count": 10, + "is_official": false, + "name": "jdswinbank/sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + }, + { + "star_count": 18, + "is_official": false, + "name": "vgauthier/sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.19/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.19/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.19/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.19" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.19/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.19/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + untag, delete + +**Example request**: + + GET /v1.19/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.19/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.19/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.19/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.19/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.19/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.19/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs": null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/components/engine/docs/api/v1.20.md b/components/engine/docs/api/v1.20.md new file mode 100644 index 0000000000..a7fc999aee --- /dev/null +++ b/components/engine/docs/api/v1.20.md @@ -0,0 +1,2394 @@ +--- +title: "Engine API v1.20" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.20/ +- /reference/api/docker_remote_api_v1.20/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.20/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.20/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "MemorySwappiness": 60, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ] + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.20/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.20/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.20/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.20/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.20/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.20/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.20/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.20/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.20/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.20/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.20/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.20/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.20/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.20/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.20/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.20/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.20/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.20/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.20/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.20/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.20/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.20/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.20/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.20/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.20/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.20", + "Experimental": false + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.20/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.20/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +**Example request**: + + GET /v1.20/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.20/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.20/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.20/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.20/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.20/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.20/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Mounts" : [] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/components/engine/docs/api/v1.21.md b/components/engine/docs/api/v1.21.md new file mode 100644 index 0000000000..1d42fd0ecc --- /dev/null +++ b/components/engine/docs/api/v1.21.md @@ -0,0 +1,2981 @@ +--- +title: "Engine API v1.21" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.21/ +- /reference/api/docker_remote_api_v1.21/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.21/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.21/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "MemorySwappiness": 60, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "" + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ] + } + +**Example request, with size information**: + + GET /v1.21/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.21/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.21/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.21/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.21/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.21/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.21/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.21/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.21/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.21/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.21/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.21/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.21/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.21/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.21/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.21/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.21/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.21/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.21/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.21/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.21/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.21/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.21/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.21/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.21/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ClusterStore": "etcd://localhost:2379", + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.21/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.20", + "Experimental": false + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.21/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.21/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +**Example request**: + + GET /v1.21/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"pull","id":"busybox:latest","time":1442421700,"timeNano":1442421700598988358} + {"status":"create","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716853979870} + {"status":"attach","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716894759198} + {"status":"start","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716983607193} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.21/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.21/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.21/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.21/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.21/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.21/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Status" : "running", + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "EndpointID": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "" + } + } + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Mounts" : [] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.21/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.21/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.21/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 2.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.21/networks HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: `name=[network-names]` , `id=[network-ids]` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/(id or name)` + +Return low-level information on the network `id` + +**Example request**: + + GET /v1.21/networks/f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found +- **500** - server error + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.21/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + } + ] + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false`. + Since Network is primarily keyed based on a random ID and not on the name, + and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, + there is no guaranteed way to check for duplicates across a cluster of docker hosts. + This parameter CheckDuplicate is there to provide a best effort checking of any networks + which has the same name but it is not guaranteed to catch all name collisions. +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` +- **Options** - Network specific options to be used by the drivers + +#### Connect a container to a network + +`POST /networks/(id or name)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4" +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id or name)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4" +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network + +#### Remove a network + +`DELETE /networks/(id or name)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for pre-defined networks +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/components/engine/docs/api/v1.22.md b/components/engine/docs/api/v1.22.md new file mode 100644 index 0000000000..9bf64b7e94 --- /dev/null +++ b/components/engine/docs/api/v1.22.md @@ -0,0 +1,3319 @@ +--- +title: "Engine API v1.22" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.22/ +- /reference/api/docker_remote_api_v1.22/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.22/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + } + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + } + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + } + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + } + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.22/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033" + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWriteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `splunk`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.22/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.22/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.22/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.22/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update resource configs of one or more containers. + +**Example request**: + + POST /v1.22/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800 + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.22/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.22/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.22/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.22/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.22/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.22/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.22/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.22/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.22/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.22/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.22/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.22/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "registrytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.22/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.22/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.22/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.22/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Token based login: + + ``` + { + "registrytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.22/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.22/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.22/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.22/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.22/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.22/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.10.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.4.2", + "GitCommit": "e75da4b", + "Arch": "amd64", + "ApiVersion": "1.22", + "BuildTime": "2015-12-01T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.22/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.22/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +**Example request**: + + GET /v1.22/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.10.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.22/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.22/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.22/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.22/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.22/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.22/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.22/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.22/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.22/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 2.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.22/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `id=` Matches all or part of a network id. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/(id or name)` + +Return low-level information on the network `id` + +**Example request**: + + GET /v1.22/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1/16" + } + ], + "Options": { + "foo": "bar" + } + }, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found +- **500** - server error + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.22/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false`. + Since Network is primarily keyed based on a random ID and not on the name, + and network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed way to check for duplicates. + This parameter CheckDuplicate is there to provide a best effort checking of any networks + which has the same name but it is not guaranteed to catch all name collisions. +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **Options** - Network specific options to be used by the drivers + +#### Connect a container to a network + +`POST /networks/(id or name)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id or name)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id or name)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for pre-defined networks +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/components/engine/docs/api/v1.23.md b/components/engine/docs/api/v1.23.md new file mode 100644 index 0000000000..508a721c75 --- /dev/null +++ b/components/engine/docs/api/v1.23.md @@ -0,0 +1,3436 @@ +--- +title: "Engine API v1.23" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.23/ +- /reference/api/docker_remote_api_v1.23/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.23/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "State": "exited", + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "State": "exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + }, + "Mounts": [] + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "State": "exited", + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + }, + "Mounts": [] + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "State": "exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + }, + "Mounts": [] + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `ancestor`=(`[:]`, `` or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.23/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PidsLimit": -1, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033" + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWriteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **UsernsMode** - Sets the usernamespace mode for the container when usernamespace remapping option is enabled. + supported values are: `host`. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.23/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.23/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "pids_stats": { + "current": 3 + }, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.23/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.23/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update configuration of one or more containers. + +**Example request**: + + POST /v1.23/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + "RestartPolicy": { + "MaximumRetryCount": 4, + "Name": "on-failure" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.23/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.23/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.23/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.23/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.23/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.23/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.23/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.23/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.23/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.23/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.23/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. +- **labels** – JSON map of string pairs for labels to set on the image. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.23/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.23/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + }, + "RootFS": { + "Type": "layers", + "Layers": [ + "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.23/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.23/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.23/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Identity token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.23/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.23/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.23/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Validate credentials for a registry and get identity token, +if available, for accessing the registry without password. + +**Example request**: + + POST /v1.23/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + + { + "Status": "Login Succeeded", + "IdentityToken": "9cbaf023786cd7..." + } + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.23/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "CgroupDriver": "cgroupfs", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelMemory": true, + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.23/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.11.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.4.2", + "GitCommit": "e75da4b", + "Arch": "amd64", + "ApiVersion": "1.23", + "BuildTime": "2015-12-01T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.23/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.23/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +**Example request**: + + GET /v1.23/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.11.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.23/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.23/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.23/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"status":"Loading layer","progressDetail":{"current":32768,"total":1292800},"progress":"[= ] 32.77 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":65536,"total":1292800},"progress":"[== ] 65.54 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":98304,"total":1292800},"progress":"[=== ] 98.3 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":131072,"total":1292800},"progress":"[===== ] 131.1 kB/1.293 MB","id":"8ac8bfaff55a"} + ... + {"stream":"Loaded image: busybox:latest\n"} + +**Example response**: + +If the "quiet" query parameter is set to `true` / `1` (`?quiet=1`), progress +details are suppressed, and only a confirmation message is returned once the +action completes. + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"stream":"Loaded image: busybox:latest\n"} + +**Query parameters**: + +- **quiet** – Boolean value, suppress progress details during load. Defaults + to `0` / `false` if omitted. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.23/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.23/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.23/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.23/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.23/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. +- **Labels** - Labels to set on the volume, specified as a map: `{"key":"value","key2":"value2"}` + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /v1.23/volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis/_data", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.23/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 3.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.23/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `id=` Matches all or part of a network id. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/(id or name)` + +Return low-level information on the network `id` + +**Example request**: + + GET /v1.23/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1/16" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal": false, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found +- **500** - server error + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.23/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "EnableIPv6": true, + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false`. + Since Network is primarily keyed based on a random ID and not on the name, + and network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed way to check for duplicates. + This parameter CheckDuplicate is there to provide a best effort checking of any networks + which has the same name but it is not guaranteed to catch all name collisions. +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **Internal** - Restrict external access to the network +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **EnableIPv6** - Enable IPv6 on the network +- **Options** - Network specific options to be used by the drivers +- **Labels** - Labels to set on the network, specified as a map: `{"key":"value" [,"key2":"value2"]}` + +#### Connect a container to a network + +`POST /networks/(id or name)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id or name)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id or name)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **403** - operation not supported for pre-defined networks +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/components/engine/docs/api/v1.24.md b/components/engine/docs/api/v1.24.md new file mode 100644 index 0000000000..fde0144212 --- /dev/null +++ b/components/engine/docs/api/v1.24.md @@ -0,0 +1,5348 @@ +--- +title: "Engine API v1.24" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.24/ +- /reference/api/docker_remote_api_v1.24/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Errors + +The Engine API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + { + "message": "page not found" + } + +The status codes that are returned for each endpoint are specified in the endpoint documentation below. + +## 3. Endpoints + +### 3.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.24/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "State": "exited", + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "State": "exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + }, + "Mounts": [] + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "State": "exited", + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + }, + "Mounts": [] + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "State": "exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + }, + "Mounts": [] + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `ancestor`=(`[:]`, `` or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + - `network`=(`` or ``) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.24/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "Healthcheck":{ + "Test": ["CMD-SHELL", "curl localhost:3000"], + "Interval": 1000000000, + "Timeout": 10000000000, + "Retries": 10, + "StartPeriod": 60000000000 + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuPercent": 80, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PidsLimit": -1, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Sysctls": { "net.ipv4.ip_forward": "1" }, + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "StorageOpt": {}, + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033", + "LinkLocalIPs":["169.254.34.68", "fe80::3468"] + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. This must be a valid RFC 1123 hostname. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **Healthcheck** - A test to perform to check that the container is healthy. + - **Test** - The test to perform. Possible values are: + + `{}` inherit healthcheck from image or parent image + + `{"NONE"}` disable healthcheck + + `{"CMD", args...}` exec arguments directly + + `{"CMD-SHELL", command}` run command with system's default shell + - **Interval** - The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + - **Timeout** - The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + - **Retries** - The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit. + - **StartPeriod** - The time to wait for container initialization before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuPercent** - An integer value containing the usable percentage of the available CPUs. (Windows daemon only) + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **IOMaximumBandwidth** - Maximum IO absolute rate in terms of IOps. + - **IOMaximumIOps** - Maximum IO absolute rate in terms of bytes per second. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWriteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **UsernsMode** - Sets the usernamespace mode for the container when usernamespace remapping option is enabled. + supported values are: `host`. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **Sysctls** - A list of kernel parameters (sysctls) to set in the container, specified as + `{ : }`, for example: + `{ "net.ipv4.ip_forward": "1" }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **StorageOpt**: Storage driver options per container. Options can be passed in the form + `{"size":"120G"}` + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuPercent": 80, + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "Sysctls": { + "net.ipv4.ip_forward": "1" + }, + "StorageOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.24/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **details** - 1/True/true or 0/False/flase, Show extra details provided to logs. Default `false`. +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.24/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "pids_stats": { + "current": 3 + }, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.24/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update configuration of one or more containers. + +**Example request**: + + POST /v1.24/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + "RestartPolicy": { + "MaximumRetryCount": 4, + "Name": "on-failure" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.24/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.24/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.24/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.24/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.24/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.24/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.24/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 3.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.24/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.24/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `before`=(`[:]`, `` or ``) + - `since`=(`[:]`, `` or ``) +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.24/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. +- **labels** – JSON map of string pairs for labels to set on the image. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.24/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.24/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + }, + "RootFS": { + "Type": "layers", + "Layers": [ + "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.24/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.24/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.24/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Identity token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.24/images/test/tag?repo=myrepo&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.24/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.24/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search +- **limit** – maximum returned search results +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `stars=` + - `is-automated=(true|false)` + - `is-official=(true|false)` + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 3.3 Misc + +#### Check auth configuration + +`POST /auth` + +Validate credentials for a registry and get identity token, +if available, for accessing the registry without password. + +**Example request**: + + POST /v1.24/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + + { + "Status": "Login Succeeded", + "IdentityToken": "9cbaf023786cd7..." + } + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.24/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "CgroupDriver": "cgroupfs", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelMemory": true, + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SecurityOptions": [ + "apparmor", + "seccomp", + "selinux" + ], + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.24/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.12.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.6.3", + "GitCommit": "deadbee", + "Arch": "amd64", + "ApiVersion": "1.24", + "BuildTime": "2016-06-14T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.24/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.24/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, load, pull, push, save, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +Docker daemon report the following event: + + reload + +**Example request**: + + GET /v1.24/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.12.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` or `daemon` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + - `daemon=`; -- daemon name or id to filter + +**Status codes**: + +- **200** – no error +- **400** - bad parameter +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.24/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.24/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.24/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"status":"Loading layer","progressDetail":{"current":32768,"total":1292800},"progress":"[= ] 32.77 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":65536,"total":1292800},"progress":"[== ] 65.54 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":98304,"total":1292800},"progress":"[=== ] 98.3 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":131072,"total":1292800},"progress":"[===== ] 131.1 kB/1.293 MB","id":"8ac8bfaff55a"} + ... + {"stream":"Loaded image: busybox:latest\n"} + +**Example response**: + +If the "quiet" query parameter is set to `true` / `1` (`?quiet=1`), progress +details are suppressed, and only a confirmation message is returned once the +action completes. + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"stream":"Loaded image: busybox:latest\n"} + +**Query parameters**: + +- **quiet** – Boolean value, suppress progress details during load. Defaults + to `0` / `false` if omitted. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.24/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.24/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.24/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 3.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.24/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Labels": null, + "Scope": "local" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: + - `name=` Matches all or part of a volume name. + - `dangling=` When set to `true` (or `1`), returns all volumes that are "dangling" (not in use by a container). When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. + - `driver=` Matches all or part of a volume driver name. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.24/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Driver": "custom" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "custom", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Status": { + "hello": "world" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Scope": "local" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. +- **Labels** - Labels to set on the volume, specified as a map: `{"key":"value","key2":"value2"}` + +**JSON fields in response**: + +Refer to the [inspect a volume](#inspect-a-volume) section or details about the +JSON fields returned in the response. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /v1.24/volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "custom", + "Mountpoint": "/var/lib/docker/volumes/tardis/_data", + "Status": { + "hello": "world" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Scope": "local" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +**JSON fields in response**: + +The following fields can be returned in the API response. Empty fields, or +fields that are not supported by the volume's driver may be omitted in the +response. + +- **Name** - Name of the volume. +- **Driver** - Name of the volume driver used by the volume. +- **Mountpoint** - Mount path of the volume on the host. +- **Status** - Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: `{"key":"value","key2":"value2"}`. + The `Status` field is optional, and is omitted if the volume driver does not + support this feature. +- **Labels** - Labels set on the volume, specified as a map: `{"key":"value","key2":"value2"}`. +- **Scope** - Scope describes the level at which the volume exists, can be one of + `global` for cluster-wide or `local` for machine level. The default is `local`. + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.24/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 3.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.24/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network id. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/(id or name)` + +Return low-level information on the network `id` + +**Example request**: + + GET /v1.24/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal": false, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found +- **500** - server error + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.24/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "EnableIPv6": true, + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **403** - operation not supported for pre-defined networks +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false`. + Since Network is primarily keyed based on a random ID and not on the name, + and network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed way to check for duplicates. + This parameter CheckDuplicate is there to provide a best effort checking of any networks + which has the same name but it is not guaranteed to catch all name collisions. +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **Internal** - Restrict external access to the network +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **EnableIPv6** - Enable IPv6 on the network +- **Options** - Network specific options to be used by the drivers +- **Labels** - Labels to set on the network, specified as a map: `{"key":"value" [,"key2":"value2"]}` + +#### Connect a container to a network + +`POST /networks/(id or name)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for swarm scoped networks +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id or name)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for swarm scoped networks +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id or name)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **403** - operation not supported for pre-defined networks +- **404** - no such network +- **500** - server error + +### 3.6 Plugins (experimental) + +#### List plugins + +`GET /plugins` + +Returns information about installed plugins. + +**Example request**: + + GET /v1.24/plugins HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Id": "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", + "Name": "tiborvass/no-remove", + "Tag": "latest", + "Active": true, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-no-remove", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } + } +] +``` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Install a plugin + +`POST /plugins/pull?name=` + +Pulls and installs a plugin. After the plugin is installed, it can be enabled +using the [`POST /plugins/(plugin name)/enable` endpoint](#enable-a-plugin). + +**Example request**: + +``` +POST /v1.24/plugins/pull?name=tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. When using +this endpoint to pull a plugin from the registry, the `X-Registry-Auth` header +can be used to include a base64-encoded AuthConfig object. Refer to the [create +an image](#create-an-image) section for more details. + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 175 + +[ + { + "Name": "network", + "Description": "", + "Value": [ + "host" + ] + }, + { + "Name": "mount", + "Description": "", + "Value": [ + "/data" + ] + }, + { + "Name": "device", + "Description": "", + "Value": [ + "/dev/cpu_dma_latency" + ] + } +] +``` + +**Query parameters**: + +- **name** - Name of the plugin to pull. The name may include a tag or digest. + This parameter is required. + +**Status codes**: + +- **200** - no error +- **500** - error parsing reference / not a valid repository/tag: repository + name must have at least one component +- **500** - plugin already exists + +#### Inspect a plugin + +`GET /plugins/(plugin name)` + +Returns detailed information about an installed plugin. + +**Example request**: + +``` +GET /v1.24/plugins/tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Id": "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", + "Name": "tiborvass/no-remove", + "Tag": "latest", + "Active": false, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-no-remove", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed + +#### Enable a plugin + +`POST /plugins/(plugin name)/enable` + +Enables a plugin + +**Example request**: + +``` +POST /v1.24/plugins/tiborvass/no-remove:latest/enable HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is already enabled + +#### Disable a plugin + +`POST /plugins/(plugin name)/disable` + +Disables a plugin + +**Example request**: + +``` +POST /v1.24/plugins/tiborvass/no-remove:latest/disable HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is already disabled + +#### Remove a plugin + +`DELETE /plugins/(plugin name)` + +Removes a plugin + +**Example request**: + +``` +DELETE /v1.24/plugins/tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is active + + + +### 3.7 Nodes + +**Note**: Node operations require the engine to be part of a swarm. + +#### List nodes + + +`GET /nodes` + +List nodes + +**Example request**: + + GET /v1.24/nodes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "ID": "24ifsmvkjbyhk", + "Version": { + "Index": 8 + }, + "CreatedAt": "2016-06-07T20:31:11.853781916Z", + "UpdatedAt": "2016-06-07T20:31:11.999868824Z", + "Spec": { + "Name": "my-node", + "Role": "manager", + "Availability": "active" + "Labels": { + "foo": "bar" + } + }, + "Description": { + "Hostname": "bf3067039e47", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 4000000000, + "MemoryBytes": 8272408576 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Labels": { + "foo": "bar", + } + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "bridge" + } + { + "Type": "Network", + "Name": "null" + } + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "172.17.0.2:2377"" + } + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + nodes list. Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `role=`(`manager`|`worker`)` + +**Status codes**: + +- **200** – no error +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect a node + + +`GET /nodes/(id or name)` + +Return low-level information on the node `id` + +**Example request**: + + GET /v1.24/nodes/24ifsmvkjbyhk HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ID": "24ifsmvkjbyhk", + "Version": { + "Index": 8 + }, + "CreatedAt": "2016-06-07T20:31:11.853781916Z", + "UpdatedAt": "2016-06-07T20:31:11.999868824Z", + "Spec": { + "Name": "my-node", + "Role": "manager", + "Availability": "active" + "Labels": { + "foo": "bar" + } + }, + "Description": { + "Hostname": "bf3067039e47", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 4000000000, + "MemoryBytes": 8272408576 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Labels": { + "foo": "bar", + } + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "bridge" + } + { + "Type": "Network", + "Name": "null" + } + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "172.17.0.2:2377"" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +#### Remove a node + + +`DELETE /nodes/(id or name)` + +Remove a node from the swarm. + +**Example request**: + + DELETE /v1.24/nodes/24ifsmvkjbyhk HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **force** - 1/True/true or 0/False/false, Force remove a node from the swarm. + Default `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +#### Update a node + + +`POST /nodes/(id)/update` + +Update a node. + +The payload of the `POST` request is the new `NodeSpec` and +overrides the current `NodeSpec` for the specified node. + +If `Availability` or `Role` are omitted, this returns an +error. Any other field omitted resets the current value to either +an empty value or the default cluster-wide value. + +**Example Request** + + POST /v1.24/nodes/24ifsmvkjbyhk/update?version=8 HTTP/1.1 + Content-Type: application/json + + { + "Availability": "active", + "Name": "node-name", + "Role": "manager", + "Labels": { + "foo": "bar" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **version** – The version number of the node object being updated. This is + required to avoid conflicting writes. + +JSON Parameters: + +- **Annotations** – Optional medata to associate with the node. + - **Name** – User-defined name for the node. + - **Labels** – A map of labels to associate with the node (e.g., + `{"key":"value", "key2":"value2"}`). +- **Role** - Role of the node (worker|manager). +- **Availability** - Availability of the node (active|pause|drain). + + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +### 3.8 Swarm + +#### Inspect swarm + + +`GET /swarm` + +Inspect swarm + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CreatedAt" : "2016-08-15T16:00:20.349727406Z", + "Spec" : { + "Dispatcher" : { + "HeartbeatPeriod" : 5000000000 + }, + "Orchestration" : { + "TaskHistoryRetentionLimit" : 10 + }, + "CAConfig" : { + "NodeCertExpiry" : 7776000000000000 + }, + "Raft" : { + "LogEntriesForSlowFollowers" : 500, + "HeartbeatTick" : 1, + "SnapshotInterval" : 10000, + "ElectionTick" : 3 + }, + "TaskDefaults" : {}, + "Name" : "default" + }, + "JoinTokens" : { + "Worker" : "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a", + "Manager" : "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + }, + "ID" : "70ilmkj2f6sp2137c753w2nmt", + "UpdatedAt" : "2016-08-15T16:32:09.623207604Z", + "Version" : { + "Index" : 51 + } + } + +**Status codes**: + +- **200** - no error +- **406** – node is not part of a swarm +- **500** - sever error + +#### Initialize a new swarm + + +`POST /swarm/init` + +Initialize a new swarm. The body of the HTTP response includes the node ID. + +**Example request**: + + POST /v1.24/swarm/init HTTP/1.1 + Content-Type: application/json + + { + "ListenAddr": "0.0.0.0:2377", + "AdvertiseAddr": "192.168.1.1:2377", + "ForceNewCluster": false, + "Spec": { + "Orchestration": {}, + "Raft": {}, + "Dispatcher": {}, + "CAConfig": {} + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 28 + Content-Type: application/json + Date: Thu, 01 Sep 2016 21:49:13 GMT + Server: Docker/1.12.0 (linux) + + "7v2t30z9blmxuhnyo6s4cpenp" + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is already part of a swarm +- **500** - server error + +JSON Parameters: + +- **ListenAddr** – Listen address used for inter-manager communication, as well as determining + the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an + address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is + used. +- **AdvertiseAddr** – Externally reachable address advertised to other nodes. This can either be + an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when + possible. +- **ForceNewCluster** – Force creation of a new swarm. +- **Spec** – Configuration settings for the new swarm. + - **Orchestration** – Configuration settings for the orchestration aspects of the swarm. + - **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. + - **Raft** – Raft related configuration. + - **SnapshotInterval** – Number of logs entries between snapshot. + - **KeepOldSnapshots** – Number of snapshots to keep beyond the current snapshot. + - **LogEntriesForSlowFollowers** – Number of log entries to keep around to sync up slow + followers after a snapshot is created. + - **HeartbeatTick** – Amount of ticks (in seconds) between each heartbeat. + - **ElectionTick** – Amount of ticks (in seconds) needed without a leader to trigger a new + election. + - **Dispatcher** – Configuration settings for the task dispatcher. + - **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher. + - **CAConfig** – Certificate authority configuration. + - **NodeCertExpiry** – Automatic expiry for nodes certificates. + - **ExternalCA** - Configuration for forwarding signing requests to an external + certificate authority. + - **Protocol** - Protocol for communication with the external CA + (currently only "cfssl" is supported). + - **URL** - URL where certificate signing requests should be sent. + - **Options** - An object with key/value pairs that are interpreted + as protocol-specific options for the external CA driver. + +#### Join an existing swarm + +`POST /swarm/join` + +Join an existing swarm + +**Example request**: + + POST /v1.24/swarm/join HTTP/1.1 + Content-Type: application/json + + { + "ListenAddr": "0.0.0.0:2377", + "AdvertiseAddr": "192.168.1.1:2377", + "RemoteAddrs": ["node1:2377"], + "JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is already part of a swarm +- **500** - server error + +JSON Parameters: + +- **ListenAddr** – Listen address used for inter-manager communication if the node gets promoted to + manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). +- **AdvertiseAddr** – Externally reachable address advertised to other nodes. This can either be + an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when + possible. +- **RemoteAddr** – Address of any manager node already participating in the swarm. +- **JoinToken** – Secret token for joining this swarm. + +#### Leave a swarm + + +`POST /swarm/leave` + +Leave a swarm + +**Example request**: + + POST /v1.24/swarm/leave HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **force** - Boolean (0/1, false/true). Force leave swarm, even if this is the last manager or that it will break the cluster. + +**Status codes**: + +- **200** – no error +- **406** – node is not part of a swarm +- **500** - server error + +#### Update a swarm + + +`POST /swarm/update` + +Update a swarm + +**Example request**: + + POST /v1.24/swarm/update HTTP/1.1 + + { + "Name": "default", + "Orchestration": { + "TaskHistoryRetentionLimit": 10 + }, + "Raft": { + "SnapshotInterval": 10000, + "LogEntriesForSlowFollowers": 500, + "HeartbeatTick": 1, + "ElectionTick": 3 + }, + "Dispatcher": { + "HeartbeatPeriod": 5000000000 + }, + "CAConfig": { + "NodeCertExpiry": 7776000000000000 + }, + "JoinTokens": { + "Worker": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx", + "Manager": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + } + } + + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **version** – The version number of the swarm object being updated. This is + required to avoid conflicting writes. +- **rotateWorkerToken** - Set to `true` (or `1`) to rotate the worker join token. +- **rotateManagerToken** - Set to `true` (or `1`) to rotate the manager join token. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is not part of a swarm +- **500** - server error + +JSON Parameters: + +- **Orchestration** – Configuration settings for the orchestration aspects of the swarm. + - **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. +- **Raft** – Raft related configuration. + - **SnapshotInterval** – Number of logs entries between snapshot. + - **KeepOldSnapshots** – Number of snapshots to keep beyond the current snapshot. + - **LogEntriesForSlowFollowers** – Number of log entries to keep around to sync up slow + followers after a snapshot is created. + - **HeartbeatTick** – Amount of ticks (in seconds) between each heartbeat. + - **ElectionTick** – Amount of ticks (in seconds) needed without a leader to trigger a new + election. +- **Dispatcher** – Configuration settings for the task dispatcher. + - **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher. +- **CAConfig** – CA configuration. + - **NodeCertExpiry** – Automatic expiry for nodes certificates. + - **ExternalCA** - Configuration for forwarding signing requests to an external + certificate authority. + - **Protocol** - Protocol for communication with the external CA + (currently only "cfssl" is supported). + - **URL** - URL where certificate signing requests should be sent. + - **Options** - An object with key/value pairs that are interpreted + as protocol-specific options for the external CA driver. +- **JoinTokens** - Tokens that can be used by other nodes to join the swarm. + - **Worker** - Token to use for joining as a worker. + - **Manager** - Token to use for joining as a manager. + +### 3.9 Services + +**Note**: Service operations require to first be part of a swarm. + +#### List services + + +`GET /services` + +List services + +**Example request**: + + GET /v1.24/services HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "ID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Version": { + "Index": 19 + }, + "CreatedAt": "2016-06-07T21:05:51.880065305Z", + "UpdatedAt": "2016-06-07T21:07:29.962229872Z", + "Spec": { + "Name": "hopeful_cori", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": { + "Constraints": [ + "node.role == worker" + ] + } + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + } + }, + "Endpoint": { + "Spec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + }, + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ], + "VirtualIPs": [ + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.2/16" + }, + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.3/16" + } + ] + } + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + services list. Available filters: + - `id=` + - `label=` + - `name=` + +**Status codes**: + +- **200** – no error +- **406** – node is not part of a swarm +- **500** – server error + +#### Create a service + +`POST /services/create` + +Create a service. When using this endpoint to create a service using a private +repository from the registry, the `X-Registry-Auth` header must be used to +include a base64-encoded AuthConfig object. Refer to the [create an +image](#create-an-image) section for more details. + +**Example request**: + + POST /v1.24/services/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "web", + "TaskTemplate": { + "ContainerSpec": { + "Image": "nginx:alpine", + "Mounts": [ + { + "ReadOnly": true, + "Source": "web-data", + "Target": "/usr/share/nginx/html", + "Type": "volume", + "VolumeOptions": { + "DriverConfig": { + }, + "Labels": { + "com.example.something": "something-value" + } + } + } + ], + "User": "33" + }, + "Networks": [ + { + "Target": "overlay1" + } + ], + "LogDriver": { + "Name": "json-file", + "Options": { + "max-file": "3", + "max-size": "10M" + } + }, + "Placement": { + "Constraints": [ + "node.role == worker" + ] + }, + "Resources": { + "Limits": { + "MemoryBytes": 104857600 + }, + "Reservations": { + } + }, + "RestartPolicy": { + "Condition": "on-failure", + "Delay": 10000000000, + "MaxAttempts": 10 + } + }, + "Mode": { + "Replicated": { + "Replicas": 4 + } + }, + "UpdateConfig": { + "Delay": 30000000000, + "Parallelism": 2, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Ports": [ + { + "Protocol": "tcp", + "PublishedPort": 8080, + "TargetPort": 80 + } + ] + }, + "Labels": { + "foo": "bar" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "ID":"ak7w3gjqoa3kuz8xcpnyy0pvl" + } + +**Status codes**: + +- **201** – no error +- **403** - network is not eligible for services +- **406** – node is not part of a swarm +- **409** – name conflicts with an existing object +- **500** - server error + +**JSON Parameters**: + +- **Name** – User-defined name for the service. +- **Labels** – A map of labels to associate with the service (e.g., `{"key":"value", "key2":"value2"}`). +- **TaskTemplate** – Specification of the tasks to start as part of the new service. + - **ContainerSpec** - Container settings for containers started as part of this task. + - **Image** – A string specifying the image name to use for the container. + - **Command** – The command to be run in the image. + - **Args** – Arguments to the command. + - **Env** – A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]`. + - **Dir** – A string specifying the working directory for commands to run in. + - **User** – A string value specifying the user inside the container. + - **Labels** – A map of labels to associate with the service (e.g., + `{"key":"value", "key2":"value2"}`). + - **Mounts** – Specification for mounts to be added to containers + created as part of the service. + - **Target** – Container path. + - **Source** – Mount source (e.g. a volume name, a host path). + - **Type** – The mount type (`bind`, or `volume`). + - **ReadOnly** – A boolean indicating whether the mount should be read-only. + - **BindOptions** - Optional configuration for the `bind` type. + - **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + - **VolumeOptions** – Optional configuration for the `volume` type. + - **NoCopy** – A boolean indicating if volume should be + populated with the data from the target. (Default false) + - **Labels** – User-defined name and labels for the volume. + - **DriverConfig** – Map of driver-specific options. + - **Name** - Name of the driver to use to create the volume. + - **Options** - key/value map of driver specific options. + - **StopGracePeriod** – Amount of time to wait for the container to terminate before + forcefully killing it. + - **LogDriver** - Log configuration for containers created as part of the + service. + - **Name** - Name of the logging driver to use (`json-file`, `syslog`, + `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`). + - **Options** - Driver-specific options. + - **Resources** – Resource requirements which apply to each individual container created as part + of the service. + - **Limits** – Define resources limits. + - **NanoCPUs** – CPU limit in units of 10-9 CPU shares. + - **MemoryBytes** – Memory limit in Bytes. + - **Reservation** – Define resources reservation. + - **NanoCPUs** – CPU reservation in units of 10-9 CPU shares. + - **MemoryBytes** – Memory reservation in Bytes. + - **RestartPolicy** – Specification for the restart policy which applies to containers created + as part of this service. + - **Condition** – Condition for restart (`none`, `on-failure`, or `any`). + - **Delay** – Delay between restart attempts. + - **MaxAttempts** – Maximum attempts to restart a given container before giving up (default value + is 0, which is ignored). + - **Window** – Windows is the time window used to evaluate the restart policy (default value is + 0, which is unbounded). + - **Placement** – Restrictions on where a service can run. + - **Constraints** – An array of constraints, e.g. `[ "node.role == manager" ]`. +- **Mode** – Scheduling mode for the service (`replicated` or `global`, defaults to `replicated`). +- **UpdateConfig** – Specification for the update strategy of the service. + - **Parallelism** – Maximum number of tasks to be updated in one iteration (0 means unlimited + parallelism). + - **Delay** – Amount of time between updates. + - **FailureAction** - Action to take if an updated task fails to run, or stops running during the + update. Values are `continue` and `pause`. +- **Networks** – Array of network names or IDs to attach the service to. +- **EndpointSpec** – Properties that can be configured to access and load balance a service. + - **Mode** – The mode of resolution to use for internal load balancing + between tasks (`vip` or `dnsrr`). Defaults to `vip` if not provided. + - **Ports** – List of exposed ports that this service is accessible on from + the outside, in the form of: + `{"Protocol": <"tcp"|"udp">, "PublishedPort": , "TargetPort": }`. + Ports can only be provided if `vip` resolution mode is used. + +**Request Headers**: + +- **Content-type** – Set to `"application/json"`. +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either + login information, or a token. Refer to the [create an image](#create-an-image) + section for more details. + + +#### Remove a service + + +`DELETE /services/(id or name)` + +Stop and remove the service `id` + +**Example request**: + + DELETE /v1.24/services/16253994b7c4 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect one or more services + + +`GET /services/(id or name)` + +Return information on the service `id`. + +**Example request**: + + GET /v1.24/services/1cb4dnqcyx6m66g2t538x3rxha HTTP/1.1 + +**Example response**: + + { + "ID": "ak7w3gjqoa3kuz8xcpnyy0pvl", + "Version": { + "Index": 95 + }, + "CreatedAt": "2016-06-07T21:10:20.269723157Z", + "UpdatedAt": "2016-06-07T21:10:20.276301259Z", + "Spec": { + "Name": "redis", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + } + }, + "Endpoint": { + "Spec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + }, + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ], + "VirtualIPs": [ + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.4/16" + } + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +#### Update a service + +`POST /services/(id)/update` + +Update a service. When using this endpoint to create a service using a +private repository from the registry, the `X-Registry-Auth` header can be used +to update the authentication information for that is stored for the service. +The header contains a base64-encoded AuthConfig object. Refer to the [create an +image](#create-an-image) section for more details. + +**Example request**: + + POST /v1.24/services/1cb4dnqcyx6m66g2t538x3rxha/update?version=23 HTTP/1.1 + Content-Type: application/json + + { + "Name": "top", + "TaskTemplate": { + "ContainerSpec": { + "Image": "busybox", + "Args": [ + "top" + ] + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1 + }, + "EndpointSpec": { + "Mode": "vip" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**JSON Parameters**: + +- **Name** – User-defined name for the service. Note that renaming services is not supported. +- **Labels** – A map of labels to associate with the service (e.g., `{"key":"value", "key2":"value2"}`). +- **TaskTemplate** – Specification of the tasks to start as part of the new service. + - **ContainerSpec** - Container settings for containers started as part of this task. + - **Image** – A string specifying the image name to use for the container. + - **Command** – The command to be run in the image. + - **Args** – Arguments to the command. + - **Env** – A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]`. + - **Dir** – A string specifying the working directory for commands to run in. + - **User** – A string value specifying the user inside the container. + - **Labels** – A map of labels to associate with the service (e.g., + `{"key":"value", "key2":"value2"}`). + - **Mounts** – Specification for mounts to be added to containers created as part of the new + service. + - **Target** – Container path. + - **Source** – Mount source (e.g. a volume name, a host path). + - **Type** – The mount type (`bind`, or `volume`). + - **ReadOnly** – A boolean indicating whether the mount should be read-only. + - **BindOptions** - Optional configuration for the `bind` type + - **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + - **VolumeOptions** – Optional configuration for the `volume` type. + - **NoCopy** – A boolean indicating if volume should be + populated with the data from the target. (Default false) + - **Labels** – User-defined name and labels for the volume. + - **DriverConfig** – Map of driver-specific options. + - **Name** - Name of the driver to use to create the volume + - **Options** - key/value map of driver specific options + - **StopGracePeriod** – Amount of time to wait for the container to terminate before + forcefully killing it. + - **Resources** – Resource requirements which apply to each individual container created as part + of the service. + - **Limits** – Define resources limits. + - **CPU** – CPU limit + - **Memory** – Memory limit + - **Reservation** – Define resources reservation. + - **CPU** – CPU reservation + - **Memory** – Memory reservation + - **RestartPolicy** – Specification for the restart policy which applies to containers created + as part of this service. + - **Condition** – Condition for restart (`none`, `on-failure`, or `any`). + - **Delay** – Delay between restart attempts. + - **MaxAttempts** – Maximum attempts to restart a given container before giving up (default value + is 0, which is ignored). + - **Window** – Windows is the time window used to evaluate the restart policy (default value is + 0, which is unbounded). + - **Placement** – Restrictions on where a service can run. + - **Constraints** – An array of constraints, e.g. `[ "node.role == manager" ]`. +- **Mode** – Scheduling mode for the service (`replicated` or `global`, defaults to `replicated`). +- **UpdateConfig** – Specification for the update strategy of the service. + - **Parallelism** – Maximum number of tasks to be updated in one iteration (0 means unlimited + parallelism). + - **Delay** – Amount of time between updates. +- **Networks** – Array of network names or IDs to attach the service to. +- **EndpointSpec** – Properties that can be configured to access and load balance a service. + - **Mode** – The mode of resolution to use for internal load balancing + between tasks (`vip` or `dnsrr`). Defaults to `vip` if not provided. + - **Ports** – List of exposed ports that this service is accessible on from + the outside, in the form of: + `{"Protocol": <"tcp"|"udp">, "PublishedPort": , "TargetPort": }`. + Ports can only be provided if `vip` resolution mode is used. + +**Query parameters**: + +- **version** – The version number of the service object being updated. This is + required to avoid conflicting writes. + +**Request Headers**: + +- **Content-type** – Set to `"application/json"`. +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either + login information, or a token. Refer to the [create an image](#create-an-image) + section for more details. + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +### 3.10 Tasks + +**Note**: Task operations require the engine to be part of a swarm. + +#### List tasks + + +`GET /tasks` + +List tasks + +**Example request**: + + GET /v1.24/tasks HTTP/1.1 + +**Example response**: + + [ + { + "ID": "0kzzo1i0y4jz6027t0k7aezc7", + "Version": { + "Index": 71 + }, + "CreatedAt": "2016-06-07T21:07:31.171892745Z", + "UpdatedAt": "2016-06-07T21:07:31.376370513Z", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:31.290032978Z", + "State": "running", + "Message": "started", + "ContainerStatus": { + "ContainerID": "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035", + "PID": 677 + } + }, + "DesiredState": "running", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.10/16" + ] + } + ], + }, + { + "ID": "1yljwbmlr8er2waf8orvqpwms", + "Version": { + "Index": 30 + }, + "CreatedAt": "2016-06-07T21:07:30.019104782Z", + "UpdatedAt": "2016-06-07T21:07:30.231958098Z", + "Name": "hopeful_cori", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:30.202183143Z", + "State": "shutdown", + "Message": "shutdown", + "ContainerStatus": { + "ContainerID": "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + } + }, + "DesiredState": "shutdown", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.5/16" + ] + } + ] + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + services list. Available filters: + - `id=` + - `name=` + - `service=` + - `node=` + - `label=key` or `label="key=value"` + - `desired-state=(running | shutdown | accepted)` + +**Status codes**: + +- **200** – no error +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect a task + + +`GET /tasks/(id)` + +Get details on the task `id` + +**Example request**: + + GET /v1.24/tasks/0kzzo1i0y4jz6027t0k7aezc7 HTTP/1.1 + +**Example response**: + + { + "ID": "0kzzo1i0y4jz6027t0k7aezc7", + "Version": { + "Index": 71 + }, + "CreatedAt": "2016-06-07T21:07:31.171892745Z", + "UpdatedAt": "2016-06-07T21:07:31.376370513Z", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:31.290032978Z", + "State": "running", + "Message": "started", + "ContainerStatus": { + "ContainerID": "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035", + "PID": 677 + } + }, + "DesiredState": "running", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.10/16" + ] + } + ] + } + +**Status codes**: + +- **200** – no error +- **404** – unknown task +- **406** - node is not part of a swarm +- **500** – server error + +## 4. Going further + +### 4.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 4.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 4.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/components/engine/docs/api/version-history.md b/components/engine/docs/api/version-history.md new file mode 100644 index 0000000000..3891d35c04 --- /dev/null +++ b/components/engine/docs/api/version-history.md @@ -0,0 +1,304 @@ +--- +title: "Engine API version history" +description: "Documentation of changes that have been made to Engine API." +keywords: "API, Docker, rcli, REST, documentation" +--- + + + +## v1.30 API changes + +[Docker Engine API v1.30](https://docs.docker.com/engine/api/v1.30/) documentation + +* `GET /info` now returns the list of supported logging drivers, including plugins. +* `GET /info` and `GET /swarm` now returns the cluster-wide swarm CA info if the node is in a swarm: the cluster root CA certificate, and the cluster TLS + leaf certificate issuer's subject and public key. It also displays the desired CA signing certificate, if any was provided as part of the spec. +* `POST /build/` now (when not silent) produces an `Aux` message in the JSON output stream with payload `types.BuildResult` for each image produced. The final such message will reference the image resulting from the build. +* `GET /nodes` and `GET /nodes/{id}` now returns additional information about swarm TLS info if the node is part of a swarm: the trusted root CA, and the + issuer's subject and public key. +* `GET /distribution/(name)/json` is a new endpoint that returns a JSON output stream with payload `types.DistributionInspect` for an image name. It includes a descriptor with the digest, and supported platforms retrieved from directly contacting the registry. +* `POST /swarm/update` now accepts 3 additional parameters as part of the swarm spec's CA configuration; the desired CA certificate for + the swarm, the desired CA key for the swarm (if not using an external certificate), and an optional parameter to force swarm to + generate and rotate to a new CA certificate/key pair. +* `POST /service/create` and `POST /services/(id or name)/update` now take the field `Platforms` as part of the service `Placement`, allowing to specify platforms supported by the service. +* `POST /containers/(name)/wait` now accepts a `condition` query parameter to indicate which state change condition to wait for. Also, response headers are now returned immediately to acknowledge that the server has registered a wait callback for the client. + +## v1.29 API changes + +[Docker Engine API v1.29](https://docs.docker.com/engine/api/v1.29/) documentation + +* `DELETE /networks/(name)` now allows to remove the ingress network, the one used to provide the routing-mesh. +* `POST /networks/create` now supports creating the ingress network, by specifying an `Ingress` boolean field. As of now this is supported only when using the overlay network driver. +* `GET /networks/(name)` now returns an `Ingress` field showing whether the network is the ingress one. +* `GET /networks/` now supports a `scope` filter to filter networks based on the network mode (`swarm`, `global`, or `local`). +* `POST /containers/create`, `POST /service/create` and `POST /services/(id or name)/update` now takes the field `StartPeriod` as a part of the `HealthConfig` allowing for specification of a period during which the container should not be considered unhealthy even if health checks do not pass. +* `GET /services/(id)` now accepts an `insertDefaults` query-parameter to merge default values into the service inspect output. +* `POST /containers/prune`, `POST /images/prune`, `POST /volumes/prune`, and `POST /networks/prune` now support a `label` filter to filter containers, images, volumes, or networks based on the label. The format of the label filter could be `label=`/`label==` to remove those with the specified labels, or `label!=`/`label!==` to remove those without the specified labels. + +## v1.28 API changes + +[Docker Engine API v1.28](https://docs.docker.com/engine/api/v1.28/) documentation + +* `POST /containers/create` now includes a `Consistency` field to specify the consistency level for each `Mount`, with possible values `default`, `consistent`, `cached`, or `delegated`. +* `GET /containers/create` now takes a `DeviceCgroupRules` field in `HostConfig` allowing to set custom device cgroup rules for the created container. +* Optional query parameter `verbose` for `GET /networks/(id or name)` will now list all services with all the tasks, including the non-local tasks on the given network. +* `GET /containers/(id or name)/attach/ws` now returns WebSocket in binary frame format for API version >= v1.28, and returns WebSocket in text frame format for API version< v1.28, for the purpose of backward-compatibility. +* `GET /networks` is optimised only to return list of all networks and network specific information. List of all containers attached to a specific network is removed from this API and is only available using the network specific `GET /networks/{network-id}. +* `GET /containers/json` now supports `publish` and `expose` filters to filter containers that expose or publish certain ports. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `ReadOnly` parameter, which mounts the container's root filesystem as read only. +* `POST /build` now accepts `extrahosts` parameter to specify a host to ip mapping to use during the build. +* `POST /services/create` and `POST /services/(id or name)/update` now accept a `rollback` value for `FailureAction`. +* `POST /services/create` and `POST /services/(id or name)/update` now accept an optional `RollbackConfig` object which specifies rollback options. +* `GET /services` now supports a `mode` filter to filter services based on the service mode (either `global` or `replicated`). +* `POST /containers/(name)/update` now supports updating `NanoCPUs` that represents CPU quota in units of 10-9 CPUs. + +## v1.27 API changes + +[Docker Engine API v1.27](https://docs.docker.com/engine/api/v1.27/) documentation + +* `GET /containers/(id or name)/stats` now includes an `online_cpus` field in both `precpu_stats` and `cpu_stats`. If this field is `nil` then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. + +## v1.26 API changes + +[Docker Engine API v1.26](https://docs.docker.com/engine/api/v1.26/) documentation + +* `POST /plugins/(plugin name)/upgrade` upgrade a plugin. + +## v1.25 API changes + +[Docker Engine API v1.25](https://docs.docker.com/engine/api/v1.25/) documentation + +* The API version is now required in all API calls. Instead of just requesting, for example, the URL `/containers/json`, you must now request `/v1.25/containers/json`. +* `GET /version` now returns `MinAPIVersion`. +* `POST /build` accepts `networkmode` parameter to specify network used during build. +* `GET /images/(name)/json` now returns `OsVersion` if populated +* `GET /info` now returns `Isolation`. +* `POST /containers/create` now takes `AutoRemove` in HostConfig, to enable auto-removal of the container on daemon side when the container's process exits. +* `GET /containers/json` and `GET /containers/(id or name)/json` now return `"removing"` as a value for the `State.Status` field if the container is being removed. Previously, "exited" was returned as status. +* `GET /containers/json` now accepts `removing` as a valid value for the `status` filter. +* `GET /containers/json` now supports filtering containers by `health` status. +* `DELETE /volumes/(name)` now accepts a `force` query parameter to force removal of volumes that were already removed out of band by the volume driver plugin. +* `POST /containers/create/` and `POST /containers/(name)/update` now validates restart policies. +* `POST /containers/create` now validates IPAMConfig in NetworkingConfig, and returns error for invalid IPv4 and IPv6 addresses (`--ip` and `--ip6` in `docker create/run`). +* `POST /containers/create` now takes a `Mounts` field in `HostConfig` which replaces `Binds`, `Volumes`, and `Tmpfs`. *note*: `Binds`, `Volumes`, and `Tmpfs` are still available and can be combined with `Mounts`. +* `POST /build` now performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. Note that this change is _unversioned_ and applied to all API versions. +* `POST /build` accepts `cachefrom` parameter to specify images used for build cache. +* `GET /networks/` endpoint now correctly returns a list of *all* networks, + instead of the default network if a trailing slash is provided, but no `name` + or `id`. +* `DELETE /containers/(name)` endpoint now returns an error of `removal of container name is already in progress` with status code of 400, when container name is in a state of removal in progress. +* `GET /containers/json` now supports a `is-task` filter to filter + containers that are tasks (part of a service in swarm mode). +* `POST /containers/create` now takes `StopTimeout` field. +* `POST /services/create` and `POST /services/(id or name)/update` now accept `Monitor` and `MaxFailureRatio` parameters, which control the response to failures during service updates. +* `POST /services/(id or name)/update` now accepts a `ForceUpdate` parameter inside the `TaskTemplate`, which causes the service to be updated even if there are no changes which would ordinarily trigger an update. +* `POST /services/create` and `POST /services/(id or name)/update` now return a `Warnings` array. +* `GET /networks/(name)` now returns field `Created` in response to show network created time. +* `POST /containers/(id or name)/exec` now accepts an `Env` field, which holds a list of environment variables to be set in the context of the command execution. +* `GET /volumes`, `GET /volumes/(name)`, and `POST /volumes/create` now return the `Options` field which holds the driver specific options to use for when creating the volume. +* `GET /exec/(id)/json` now returns `Pid`, which is the system pid for the exec'd process. +* `POST /containers/prune` prunes stopped containers. +* `POST /images/prune` prunes unused images. +* `POST /volumes/prune` prunes unused volumes. +* `POST /networks/prune` prunes unused networks. +* Every API response now includes a `Docker-Experimental` header specifying if experimental features are enabled (value can be `true` or `false`). +* Every API response now includes a `API-Version` header specifying the default API version of the server. +* The `hostConfig` option now accepts the fields `CpuRealtimePeriod` and `CpuRtRuntime` to allocate cpu runtime to rt tasks when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel. +* The `SecurityOptions` field within the `GET /info` response now includes `userns` if user namespaces are enabled in the daemon. +* `GET /nodes` and `GET /node/(id or name)` now return `Addr` as part of a node's `Status`, which is the address that that node connects to the manager from. +* The `HostConfig` field now includes `NanoCPUs` that represents CPU quota in units of 10-9 CPUs. +* `GET /info` now returns more structured information about security options. +* The `HostConfig` field now includes `CpuCount` that represents the number of CPUs available for execution by the container. Windows daemon only. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `TTY` parameter, which allocate a pseudo-TTY in container. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `DNSConfig` parameter, which specifies DNS related configurations in resolver configuration file (resolv.conf) through `Nameservers`, `Search`, and `Options`. +* `GET /networks/(id or name)` now includes IP and name of all peers nodes for swarm mode overlay networks. +* `GET /plugins` list plugins. +* `POST /plugins/pull?name=` pulls a plugin. +* `GET /plugins/(plugin name)` inspect a plugin. +* `POST /plugins/(plugin name)/set` configure a plugin. +* `POST /plugins/(plugin name)/enable` enable a plugin. +* `POST /plugins/(plugin name)/disable` disable a plugin. +* `POST /plugins/(plugin name)/push` push a plugin. +* `POST /plugins/create?name=(plugin name)` create a plugin. +* `DELETE /plugins/(plugin name)` delete a plugin. +* `POST /node/(id or name)/update` now accepts both `id` or `name` to identify the node to update. +* `GET /images/json` now support a `reference` filter. +* `GET /secrets` returns information on the secrets. +* `POST /secrets/create` creates a secret. +* `DELETE /secrets/{id}` removes the secret `id`. +* `GET /secrets/{id}` returns information on the secret `id`. +* `POST /secrets/{id}/update` updates the secret `id`. +* `POST /services/(id or name)/update` now accepts service name or prefix of service id as a parameter. +* `POST /containers/create` added 2 built-in log-opts that work on all logging drivers, +`mode` (`blocking`|`non-blocking`), and `max-buffer-size` (e.g. `2m`) which enables a non-blocking log buffer. + +## v1.24 API changes + +[Docker Engine API v1.24](v1.24.md) documentation + +* `POST /containers/create` now takes `StorageOpt` field. +* `GET /info` now returns `SecurityOptions` field, showing if `apparmor`, `seccomp`, or `selinux` is supported. +* `GET /info` no longer returns the `ExecutionDriver` property. This property was no longer used after integration + with ContainerD in Docker 1.11. +* `GET /networks` now supports filtering by `label` and `driver`. +* `GET /containers/json` now supports filtering containers by `network` name or id. +* `POST /containers/create` now takes `IOMaximumBandwidth` and `IOMaximumIOps` fields. Windows daemon only. +* `POST /containers/create` now returns an HTTP 400 "bad parameter" message + if no command is specified (instead of an HTTP 500 "server error") +* `GET /images/search` now takes a `filters` query parameter. +* `GET /events` now supports a `reload` event that is emitted when the daemon configuration is reloaded. +* `GET /events` now supports filtering by daemon name or ID. +* `GET /events` now supports a `detach` event that is emitted on detaching from container process. +* `GET /events` now supports an `exec_detach ` event that is emitted on detaching from exec process. +* `GET /images/json` now supports filters `since` and `before`. +* `POST /containers/(id or name)/start` no longer accepts a `HostConfig`. +* `POST /images/(name)/tag` no longer has a `force` query parameter. +* `GET /images/search` now supports maximum returned search results `limit`. +* `POST /containers/{name:.*}/copy` is now removed and errors out starting from this API version. +* API errors are now returned as JSON instead of plain text. +* `POST /containers/create` and `POST /containers/(id)/start` allow you to configure kernel parameters (sysctls) for use in the container. +* `POST /containers//exec` and `POST /exec//start` + no longer expects a "Container" field to be present. This property was not used + and is no longer sent by the docker client. +* `POST /containers/create/` now validates the hostname (should be a valid RFC 1123 hostname). +* `POST /containers/create/` `HostConfig.PidMode` field now accepts `container:`, + to have the container join the PID namespace of an existing container. + +## v1.23 API changes + +[Docker Engine API v1.23](v1.23.md) documentation + +* `GET /containers/json` returns the state of the container, one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`. +* `GET /containers/json` returns the mount points for the container. +* `GET /networks/(name)` now returns an `Internal` field showing whether the network is internal or not. +* `GET /networks/(name)` now returns an `EnableIPv6` field showing whether the network has ipv6 enabled or not. +* `POST /containers/(name)/update` now supports updating container's restart policy. +* `POST /networks/create` now supports enabling ipv6 on the network by setting the `EnableIPv6` field (doing this with a label will no longer work). +* `GET /info` now returns `CgroupDriver` field showing what cgroup driver the daemon is using; `cgroupfs` or `systemd`. +* `GET /info` now returns `KernelMemory` field, showing if "kernel memory limit" is supported. +* `POST /containers/create` now takes `PidsLimit` field, if the kernel is >= 4.3 and the pids cgroup is supported. +* `GET /containers/(id or name)/stats` now returns `pids_stats`, if the kernel is >= 4.3 and the pids cgroup is supported. +* `POST /containers/create` now allows you to override usernamespaces remapping and use privileged options for the container. +* `POST /containers/create` now allows specifying `nocopy` for named volumes, which disables automatic copying from the container path to the volume. +* `POST /auth` now returns an `IdentityToken` when supported by a registry. +* `POST /containers/create` with both `Hostname` and `Domainname` fields specified will result in the container's hostname being set to `Hostname`, rather than `Hostname.Domainname`. +* `GET /volumes` now supports more filters, new added filters are `name` and `driver`. +* `GET /containers/(id or name)/logs` now accepts a `details` query parameter to stream the extra attributes that were provided to the containers `LogOpts`, such as environment variables and labels, with the logs. +* `POST /images/load` now returns progress information as a JSON stream, and has a `quiet` query parameter to suppress progress details. + +## v1.22 API changes + +[Docker Engine API v1.22](v1.22.md) documentation + +* `POST /container/(name)/update` updates the resources of a container. +* `GET /containers/json` supports filter `isolation` on Windows. +* `GET /containers/json` now returns the list of networks of containers. +* `GET /info` Now returns `Architecture` and `OSType` fields, providing information + about the host architecture and operating system type that the daemon runs on. +* `GET /networks/(name)` now returns a `Name` field for each container attached to the network. +* `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it + consistent with other date/time values returned by the API. +* `AuthConfig` now supports a `registrytoken` for token based authentication +* `POST /containers/create` now has a 4M minimum value limit for `HostConfig.KernelMemory` +* Pushes initiated with `POST /images/(name)/push` and pulls initiated with `POST /images/create` + will be cancelled if the HTTP connection making the API request is closed before + the push or pull completes. +* `POST /containers/create` now allows you to set a read/write rate limit for a + device (in bytes per second or IO per second). +* `GET /networks` now supports filtering by `name`, `id` and `type`. +* `POST /containers/create` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `POST /networks/(id)/connect` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `GET /info` now includes the number of containers running, stopped, and paused. +* `POST /networks/create` now supports restricting external access to the network by setting the `Internal` field. +* `POST /networks/(id)/disconnect` now includes a `Force` option to forcefully disconnect a container from network +* `GET /containers/(id)/json` now returns the `NetworkID` of containers. +* `POST /networks/create` Now supports an options field in the IPAM config that provides options + for custom IPAM plugins. +* `GET /networks/{network-id}` Now returns IPAM config options for custom IPAM plugins if any + are available. +* `GET /networks/` now returns subnets info for user-defined networks. +* `GET /info` can now return a `SystemStatus` field useful for returning additional information about applications + that are built on top of engine. + +## v1.21 API changes + +[Docker Engine API v1.21](v1.21.md) documentation + +* `GET /volumes` lists volumes from all volume drivers. +* `POST /volumes/create` to create a volume. +* `GET /volumes/(name)` get low-level information about a volume. +* `DELETE /volumes/(name)` remove a volume with the specified name. +* `VolumeDriver` was moved from `config` to `HostConfig` to make the configuration portable. +* `GET /images/(name)/json` now returns information about an image's `RepoTags` and `RepoDigests`. +* The `config` option now accepts the field `StopSignal`, which specifies the signal to use to kill a container. +* `GET /containers/(id)/stats` will return networking information respectively for each interface. +* The `HostConfig` option now includes the `DnsOptions` field to configure the container's DNS options. +* `POST /build` now optionally takes a serialized map of build-time variables. +* `GET /events` now includes a `timenano` field, in addition to the existing `time` field. +* `GET /events` now supports filtering by image and container labels. +* `GET /info` now lists engine version information and return the information of `CPUShares` and `Cpuset`. +* `GET /containers/json` will return `ImageID` of the image used by container. +* `POST /exec/(name)/start` will now return an HTTP 409 when the container is either stopped or paused. +* `POST /containers/create` now takes `KernelMemory` in HostConfig to specify kernel memory limit. +* `GET /containers/(name)/json` now accepts a `size` parameter. Setting this parameter to '1' returns container size information in the `SizeRw` and `SizeRootFs` fields. +* `GET /containers/(name)/json` now returns a `NetworkSettings.Networks` field, + detailing network settings per network. This field deprecates the + `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, + `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which + are still returned for backward-compatibility, but will be removed in a future version. +* `GET /exec/(id)/json` now returns a `NetworkSettings.Networks` field, + detailing networksettings per network. This field deprecates the + `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, + `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which + are still returned for backward-compatibility, but will be removed in a future version. +* The `HostConfig` option now includes the `OomScoreAdj` field for adjusting the + badness heuristic. This heuristic selects which processes the OOM killer kills + under out-of-memory conditions. + +## v1.20 API changes + +[Docker Engine API v1.20](v1.20.md) documentation + +* `GET /containers/(id)/archive` get an archive of filesystem content from a container. +* `PUT /containers/(id)/archive` upload an archive of content to be extracted to +an existing directory inside a container's filesystem. +* `POST /containers/(id)/copy` is deprecated in favor of the above `archive` +endpoint which can be used to download files and directories from a container. +* The `hostConfig` option now accepts the field `GroupAdd`, which specifies a +list of additional groups that the container process will run as. + +## v1.19 API changes + +[Docker Engine API v1.19](v1.19.md) documentation + +* When the daemon detects a version mismatch with the client, usually when +the client is newer than the daemon, an HTTP 400 is now returned instead +of a 404. +* `GET /containers/(id)/stats` now accepts `stream` bool to get only one set of stats and disconnect. +* `GET /containers/(id)/logs` now accepts a `since` timestamp parameter. +* `GET /info` The fields `Debug`, `IPv4Forwarding`, `MemoryLimit`, and +`SwapLimit` are now returned as boolean instead of as an int. In addition, the +end point now returns the new boolean fields `CpuCfsPeriod`, `CpuCfsQuota`, and +`OomKillDisable`. +* The `hostConfig` option now accepts the fields `CpuPeriod` and `CpuQuota` +* `POST /build` accepts `cpuperiod` and `cpuquota` options + +## v1.18 API changes + +[Docker Engine API v1.18](v1.18.md) documentation + +* `GET /version` now returns `Os`, `Arch` and `KernelVersion`. +* `POST /containers/create` and `POST /containers/(id)/start`allow you to set ulimit settings for use in the container. +* `GET /info` now returns `SystemTime`, `HttpProxy`,`HttpsProxy` and `NoProxy`. +* `GET /images/json` added a `RepoDigests` field to include image digest information. +* `POST /build` can now set resource constraints for all containers created for the build. +* `CgroupParent` can be passed in the host config to setup container cgroups under a specific cgroup. +* `POST /build` closing the HTTP request cancels the build +* `POST /containers/(id)/exec` includes `Warnings` field to response. diff --git a/components/engine/docs/deprecated.md b/components/engine/docs/deprecated.md new file mode 100644 index 0000000000..7e0bfc0a60 --- /dev/null +++ b/components/engine/docs/deprecated.md @@ -0,0 +1,321 @@ +--- +aliases: ["/engine/misc/deprecated/"] +title: "Deprecated Engine Features" +description: "Deprecated Features." +keywords: "docker, documentation, about, technology, deprecate" +--- + + + +# Deprecated Engine Features + +The following list of features are deprecated in Engine. +To learn more about Docker Engine's deprecation policy, +see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy). + +### Asynchronous `service create` and `service update` + +**Deprecated In Release: v17.05.0** + +**Disabled by default in release: v17.09** + +Docker 17.05.0 added an optional `--detach=false` option to make the +`docker service create` and `docker service update` work synchronously. This +option will be enable by default in Docker 17.09, at which point the `--detach` +flag can be used to use the previous (asynchronous) behavior. + +### `-g` and `--graph` flags on `dockerd` + +**Deprecated In Release: v17.05.0** + +The `-g` or `--graph` flag for the `dockerd` or `docker daemon` command was +used to indicate the directory in which to store persistent data and resource +configuration and has been replaced with the more descriptive `--data-root` +flag. + +These flags were added before Docker 1.0, so will not be _removed_, only +_hidden_, to discourage their use. + +### Top-level network properties in NetworkSettings + +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +When inspecting a container, `NetworkSettings` contains top-level information +about the default ("bridge") network; + +`EndpointID`, `Gateway`, `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, +`IPPrefixLen`, `IPv6Gateway`, and `MacAddress`. + +These properties are deprecated in favor of per-network properties in +`NetworkSettings.Networks`. These properties were already "deprecated" in +docker 1.9, but kept around for backward compatibility. + +Refer to [#17538](https://github.com/docker/docker/pull/17538) for further +information. + +### `filter` param for `/images/json` endpoint +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +The `filter` param to filter the list of image by reference (name or name:tag) is now implemented as a regular filter, named `reference`. + +### `repository:shortid` image references +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +`repository:shortid` syntax for referencing images is very little used, collides with tag references can be confused with digest references. + +### `docker daemon` subcommand +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +The daemon is moved to a separate binary (`dockerd`), and should be used instead. + +### Duplicate keys with conflicting values in engine labels +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +Duplicate keys with conflicting values have been deprecated. A warning is displayed +in the output, and an error will be returned in the future. + +### `MAINTAINER` in Dockerfile +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +`MAINTAINER` was an early very limited form of `LABEL` which should be used instead. + +### API calls without a version +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +API versions should be supplied to all API calls to ensure compatibility with +future Engine versions. Instead of just requesting, for example, the URL +`/containers/json`, you must now request `/v1.25/containers/json`. + +### Backing filesystem without `d_type` support for overlay/overlay2 +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +The overlay and overlay2 storage driver does not work as expected if the backing +filesystem does not support `d_type`. For example, XFS does not support `d_type` +if it is formatted with the `ftype=0` option. + +Please also refer to [#27358](https://github.com/docker/docker/issues/27358) for +further information. + +### Three arguments form in `docker import` +**Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The `docker import` command format `file|URL|- [REPOSITORY [TAG]]` is deprecated since November 2013. It's no more supported. + +### `-h` shorthand for `--help` + +**Deprecated In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +**Target For Removal In Release: v17.09** + +The shorthand (`-h`) is less common than `--help` on Linux and cannot be used +on all subcommands (due to it conflicting with, e.g. `-h` / `--hostname` on +`docker create`). For this reason, the `-h` shorthand was not printed in the +"usage" output of subcommands, nor documented, and is now marked "deprecated". + +### `-e` and `--email` flags on `docker login` +**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** + +**Target For Removal In Release: v17.06** + +The docker login command is removing the ability to automatically register for an account with the target registry if the given username doesn't exist. Due to this change, the email flag is no longer required, and will be deprecated. + +### Separator (`:`) of `--security-opt` flag on `docker run` +**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** + +**Target For Removal In Release: v17.06** + +The flag `--security-opt` doesn't use the colon separator(`:`) anymore to divide keys and values, it uses the equal symbol(`=`) for consistency with other similar flags, like `--storage-opt`. + +### `/containers/(id or name)/copy` endpoint + +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The endpoint `/containers/(id or name)/copy` is deprecated in favor of `/containers/(id or name)/archive`. + +### Ambiguous event fields in API +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The fields `ID`, `Status` and `From` in the events API have been deprecated in favor of a more rich structure. +See the events API documentation for the new format. + +### `-f` flag on `docker tag` +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is not longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use. + +### HostConfig at API container start +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of +defining it at container creation (`POST /containers/create`). + +### `--before` and `--since` flags on `docker ps` + +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The `docker ps --before` and `docker ps --since` options are deprecated. +Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead. + +### `--automated` and `--stars` flags on `docker search` + +**Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +**Target For Removal In Release: v17.09** + +The `docker search --automated` and `docker search --stars` options are deprecated. +Use `docker search --filter=is-automated=...` and `docker search --filter=stars=...` instead. + +### Driver Specific Log Tags +**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Log tags are now generated in a standard way across different logging drivers. +Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and +`fluentd-tag` have been deprecated in favor of the generic `tag` option. + +```bash +{% raw %} +docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}" +{% endraw %} +``` + +### LXC built-in exec driver +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The built-in LXC execution driver, the lxc-conf flag, and API fields have been removed. + +### Old Command Line Options +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The flags `-d` and `--daemon` are deprecated in favor of the `daemon` subcommand: + + docker daemon -H ... + +The following single-dash (`-opt`) variant of certain command line options +are deprecated and replaced with double-dash options (`--opt`): + + docker attach -nostdin + docker attach -sig-proxy + docker build -no-cache + docker build -rm + docker commit -author + docker commit -run + docker events -since + docker history -notrunc + docker images -notrunc + docker inspect -format + docker ps -beforeId + docker ps -notrunc + docker ps -sinceId + docker rm -link + docker run -cidfile + docker run -dns + docker run -entrypoint + docker run -expose + docker run -link + docker run -lxc-conf + docker run -n + docker run -privileged + docker run -volumes-from + docker search -notrunc + docker search -stars + docker search -t + docker search -trusted + docker tag -force + +The following double-dash options are deprecated and have no replacement: + + docker run --cpuset + docker run --networking + docker ps --since-id + docker ps --before-id + docker search --trusted + +**Deprecated In Release: [v1.5.0](https://github.com/docker/docker/releases/tag/v1.5.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The single-dash (`-help`) was removed, in favor of the double-dash `--help` + + docker -help + docker [COMMAND] -help + +### `--run` flag on docker commit + +**Deprecated In Release: [v0.10.0](https://github.com/docker/docker/releases/tag/v0.10.0)** + +**Removed In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +The flag `--run` of the docker commit (and its short version `-run`) were deprecated in favor +of the `--changes` flag that allows to pass `Dockerfile` commands. + + +### Interacting with V1 registries + +**Disabled By Default In Release: v17.06** + +**Target For Removal In Release: v17.12** + +Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the +docker daemon from `pull`, `push`, and `login` operations against v1 +registries. Though enabled by default, this signals the intent to deprecate +the v1 protocol. + +Support for the v1 protocol to the public registry was removed in 1.13. Any +mirror configurations using v1 should be updated to use a +[v2 registry mirror](https://docs.docker.com/registry/recipes/mirror/). + +### Docker Content Trust ENV passphrase variables name change +**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the Tagging key has been renamed to Repository key. Due to this renaming, we're also changing the corresponding environment variables + +- DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE is now named DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE +- DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE is now named DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE + +### `--api-enable-cors` flag on dockerd + +**Deprecated In Release: [v1.6.0](https://github.com/docker/docker/releases/tag/v1.6.0)** + +**Target For Removal In Release: v17.09** + +The flag `--api-enable-cors` is deprecated since v1.6.0. Use the flag +`--api-cors-header` instead. diff --git a/components/engine/docs/extend/EBS_volume.md b/components/engine/docs/extend/EBS_volume.md new file mode 100644 index 0000000000..8c64efa164 --- /dev/null +++ b/components/engine/docs/extend/EBS_volume.md @@ -0,0 +1,164 @@ +--- +description: Volume plugin for Amazon EBS +keywords: "API, Usage, plugins, documentation, developer, amazon, ebs, rexray, volume" +title: Volume plugin for Amazon EBS +--- + + + +# A proof-of-concept Rexray plugin + +In this example, a simple Rexray plugin will be created for the purposes of using +it on an Amazon EC2 instance with EBS. It is not meant to be a complete Rexray plugin. + +The example source is available at [https://github.com/tiborvass/rexray-plugin](https://github.com/tiborvass/rexray-plugin). + +To learn more about Rexray: [https://github.com/codedellemc/rexray](https://github.com/codedellemc/rexray) + +## 1. Make a Docker image + +The following is the Dockerfile used to containerize rexray. + +```Dockerfile +FROM debian:jessie +RUN apt-get update && apt-get install -y --no-install-recommends wget ca-certificates +RUN wget https://dl.bintray.com/emccode/rexray/stable/0.6.4/rexray-Linux-x86_64-0.6.4.tar.gz -O rexray.tar.gz && tar -xvzf rexray.tar.gz -C /usr/bin && rm rexray.tar.gz +RUN mkdir -p /run/docker/plugins /var/lib/libstorage/volumes +ENTRYPOINT ["rexray"] +CMD ["--help"] +``` + +To build it you can run `image=$(cat Dockerfile | docker build -q -)` and `$image` +will reference the containerized rexray image. + +## 2. Extract rootfs + +```sh +$ TMPDIR=/tmp/rexray # for the purpose of this example +$ # create container without running it, to extract the rootfs from image +$ docker create --name rexray "$image" +$ # save the rootfs to a tar archive +$ docker export -o $TMPDIR/rexray.tar rexray +$ # extract rootfs from tar archive to a rootfs folder +$ ( mkdir -p $TMPDIR/rootfs; cd $TMPDIR/rootfs; tar xf ../rexray.tar ) +``` + +## 3. Add plugin configuration + +We have to put the following JSON to `$TMPDIR/config.json`: + +```json +{ + "Args": { + "Description": "", + "Name": "", + "Settable": null, + "Value": null + }, + "Description": "A proof-of-concept EBS plugin (using rexray) for Docker", + "Documentation": "https://github.com/tiborvass/rexray-plugin", + "Entrypoint": [ + "/usr/bin/rexray", "service", "start", "-f" + ], + "Env": [ + { + "Description": "", + "Name": "REXRAY_SERVICE", + "Settable": [ + "value" + ], + "Value": "ebs" + }, + { + "Description": "", + "Name": "EBS_ACCESSKEY", + "Settable": [ + "value" + ], + "Value": "" + }, + { + "Description": "", + "Name": "EBS_SECRETKEY", + "Settable": [ + "value" + ], + "Value": "" + } + ], + "Interface": { + "Socket": "rexray.sock", + "Types": [ + "docker.volumedriver/1.0" + ] + }, + "Linux": { + "AllowAllDevices": true, + "Capabilities": ["CAP_SYS_ADMIN"], + "Devices": null + }, + "Mounts": [ + { + "Source": "/dev", + "Destination": "/dev", + "Type": "bind", + "Options": ["rbind"] + } + ], + "Network": { + "Type": "host" + }, + "PropagatedMount": "/var/lib/libstorage/volumes", + "User": {}, + "WorkDir": "" +} +``` + +Please note a couple of points: +- `PropagatedMount` is needed so that the docker daemon can see mounts done by the +rexray plugin from within the container, otherwise the docker daemon is not able +to mount a docker volume. +- The rexray plugin needs dynamic access to host devices. For that reason, we +have to give it access to all devices under `/dev` and set `AllowAllDevices` to +true for proper access. +- The user of this simple plugin can change only 3 settings: `REXRAY_SERVICE`, +`EBS_ACCESSKEY` and `EBS_SECRETKEY`. This is because of the reduced scope of this +plugin. Ideally other rexray parameters could also be set. + +## 4. Create plugin + +`docker plugin create tiborvass/rexray-plugin "$TMPDIR"` will create the plugin. + +```sh +$ docker plugin ls +ID NAME DESCRIPTION ENABLED +2475a4bd0ca5 tiborvass/rexray-plugin:latest A rexray volume plugin for Docker false +``` + +## 5. Test plugin + +```sh +$ docker plugin set tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY` +$ docker plugin enable tiborvass/rexray-plugin +$ docker volume create -d tiborvass/rexray-plugin my-ebs-volume +$ docker volume ls +DRIVER VOLUME NAME +tiborvass/rexray-plugin:latest my-ebs-volume +$ docker run --rm -v my-ebs-volume:/volume busybox sh -c 'echo bye > /volume/hi' +$ docker run --rm -v my-ebs-volume:/volume busybox cat /volume/hi +bye +``` + +## 6. Push plugin + +First, ensure you are logged in with `docker login`. Then you can run: +`docker plugin push tiborvass/rexray-plugin` to push it like a regular docker +image to a registry, to make it available for others to install via +`docker plugin install tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY`. diff --git a/components/engine/docs/extend/config.md b/components/engine/docs/extend/config.md new file mode 100644 index 0000000000..bb6c7f2ceb --- /dev/null +++ b/components/engine/docs/extend/config.md @@ -0,0 +1,238 @@ +--- +title: "Plugin config" +description: "How develop and use a plugin with the managed plugin system" +keywords: "API, Usage, plugins, documentation, developer" +--- + + + + +# Plugin Config Version 1 of Plugin V2 + +This document outlines the format of the V0 plugin configuration. The plugin +config described herein was introduced in the Docker daemon in the [v1.12.0 +release](https://github.com/docker/docker/commit/f37117045c5398fd3dca8016ea8ca0cb47e7312b). + +Plugin configs describe the various constituents of a docker plugin. Plugin +configs can be serialized to JSON format with the following media types: + +Config Type | Media Type +------------- | ------------- +config | "application/vnd.docker.plugin.v1+json" + + +## *Config* Field Descriptions + +Config provides the base accessible fields for working with V0 plugin format + in the registry. + +- **`description`** *string* + + description of the plugin + +- **`documentation`** *string* + + link to the documentation about the plugin + +- **`interface`** *PluginInterface* + + interface implemented by the plugins, struct consisting of the following fields + + - **`types`** *string array* + + types indicate what interface(s) the plugin currently implements. + + currently supported: + + - **docker.volumedriver/1.0** + + - **docker.networkdriver/1.0** + + - **docker.ipamdriver/1.0** + + - **docker.authz/1.0** + + - **docker.logdriver/1.0** + + - **docker.metricscollector/1.0** + + - **`socket`** *string* + + socket is the name of the socket the engine should use to communicate with the plugins. + the socket will be created in `/run/docker/plugins`. + + +- **`entrypoint`** *string array* + + entrypoint of the plugin, see [`ENTRYPOINT`](../reference/builder.md#entrypoint) + +- **`workdir`** *string* + + workdir of the plugin, see [`WORKDIR`](../reference/builder.md#workdir) + +- **`network`** *PluginNetwork* + + network of the plugin, struct consisting of the following fields + + - **`type`** *string* + + network type. + + currently supported: + + - **bridge** + - **host** + - **none** + +- **`mounts`** *PluginMount array* + + mount of the plugin, struct consisting of the following fields, see [`MOUNTS`](https://github.com/opencontainers/runtime-spec/blob/master/config.md#mounts) + + - **`name`** *string* + + name of the mount. + + - **`description`** *string* + + description of the mount. + + - **`source`** *string* + + source of the mount. + + - **`destination`** *string* + + destination of the mount. + + - **`type`** *string* + + mount type. + + - **`options`** *string array* + + options of the mount. + +- **`ipchost`** *boolean* + Access to host ipc namespace. +- **`pidhost`** *boolean* + Access to host pid namespace. + +- **`propagatedMount`** *string* + + path to be mounted as rshared, so that mounts under that path are visible to docker. This is useful for volume plugins. + This path will be bind-mounted outisde of the plugin rootfs so it's contents + are preserved on upgrade. + +- **`env`** *PluginEnv array* + + env of the plugin, struct consisting of the following fields + + - **`name`** *string* + + name of the env. + + - **`description`** *string* + + description of the env. + + - **`value`** *string* + + value of the env. + +- **`args`** *PluginArgs* + + args of the plugin, struct consisting of the following fields + + - **`name`** *string* + + name of the args. + + - **`description`** *string* + + description of the args. + + - **`value`** *string array* + + values of the args. + +- **`linux`** *PluginLinux* + + - **`capabilities`** *string array* + + capabilities of the plugin (*Linux only*), see list [`here`](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md#security) + + - **`allowAllDevices`** *boolean* + + If `/dev` is bind mounted from the host, and allowAllDevices is set to true, the plugin will have `rwm` access to all devices on the host. + + - **`devices`** *PluginDevice array* + + device of the plugin, (*Linux only*), struct consisting of the following fields, see [`DEVICES`](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#devices) + + - **`name`** *string* + + name of the device. + + - **`description`** *string* + + description of the device. + + - **`path`** *string* + + path of the device. + +## Example Config + +*Example showing the 'tiborvass/sample-volume-plugin' plugin config.* + +```json +{ + "Args": { + "Description": "", + "Name": "", + "Settable": null, + "Value": null + }, + "Description": "A sample volume plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Entrypoint": [ + "/usr/bin/sample-volume-plugin", + "/data" + ], + "Env": [ + { + "Description": "", + "Name": "DEBUG", + "Settable": [ + "value" + ], + "Value": "0" + } + ], + "Interface": { + "Socket": "plugin.sock", + "Types": [ + "docker.volumedriver/1.0" + ] + }, + "Linux": { + "Capabilities": null, + "AllowAllDevices": false, + "Devices": null + }, + "Mounts": null, + "Network": { + "Type": "" + }, + "PropagatedMount": "/data", + "User": {}, + "Workdir": "" +} +``` diff --git a/components/engine/docs/extend/images/authz_additional_info.png b/components/engine/docs/extend/images/authz_additional_info.png new file mode 100644 index 0000000000..1a6a6d01d2 Binary files /dev/null and b/components/engine/docs/extend/images/authz_additional_info.png differ diff --git a/components/engine/docs/extend/images/authz_allow.png b/components/engine/docs/extend/images/authz_allow.png new file mode 100644 index 0000000000..f42108040b Binary files /dev/null and b/components/engine/docs/extend/images/authz_allow.png differ diff --git a/components/engine/docs/extend/images/authz_chunked.png b/components/engine/docs/extend/images/authz_chunked.png new file mode 100644 index 0000000000..5bde2c71f9 Binary files /dev/null and b/components/engine/docs/extend/images/authz_chunked.png differ diff --git a/components/engine/docs/extend/images/authz_connection_hijack.png b/components/engine/docs/extend/images/authz_connection_hijack.png new file mode 100644 index 0000000000..f13a2987b2 Binary files /dev/null and b/components/engine/docs/extend/images/authz_connection_hijack.png differ diff --git a/components/engine/docs/extend/images/authz_deny.png b/components/engine/docs/extend/images/authz_deny.png new file mode 100644 index 0000000000..fa4a48584a Binary files /dev/null and b/components/engine/docs/extend/images/authz_deny.png differ diff --git a/components/engine/docs/extend/index.md b/components/engine/docs/extend/index.md new file mode 100644 index 0000000000..2fe2d83687 --- /dev/null +++ b/components/engine/docs/extend/index.md @@ -0,0 +1,262 @@ +--- +description: Develop and use a plugin with the managed plugin system +keywords: "API, Usage, plugins, documentation, developer" +title: Managed plugin system +--- + + + +# Docker Engine managed plugin system + +* [Installing and using a plugin](index.md#installing-and-using-a-plugin) +* [Developing a plugin](index.md#developing-a-plugin) +* [Debugging plugins](index.md#debugging-plugins) + +Docker Engine's plugin system allows you to install, start, stop, and remove +plugins using Docker Engine. + +For information about the legacy plugin system available in Docker Engine 1.12 +and earlier, see [Understand legacy Docker Engine plugins](legacy_plugins.md). + +> **Note**: Docker Engine managed plugins are currently not supported +on Windows daemons. + +## Installing and using a plugin + +Plugins are distributed as Docker images and can be hosted on Docker Hub or on +a private registry. + +To install a plugin, use the `docker plugin install` command, which pulls the +plugin from Docker Hub or your private registry, prompts you to grant +permissions or capabilities if necessary, and enables the plugin. + +To check the status of installed plugins, use the `docker plugin ls` command. +Plugins that start successfully are listed as enabled in the output. + +After a plugin is installed, you can use it as an option for another Docker +operation, such as creating a volume. + +In the following example, you install the `sshfs` plugin, verify that it is +enabled, and use it to create a volume. + +> **Note**: This example is intended for instructional purposes only. Once the volume is created, your SSH password to the remote host will be exposed as plaintext when inspecting the volume. You should delete the volume as soon as you are done with the example. + +1. Install the `sshfs` plugin. + + ```bash + $ docker plugin install vieux/sshfs + + Plugin "vieux/sshfs" is requesting the following privileges: + - network: [host] + - capabilities: [CAP_SYS_ADMIN] + Do you grant the above permissions? [y/N] y + + vieux/sshfs + ``` + + The plugin requests 2 privileges: + + - It needs access to the `host` network. + - It needs the `CAP_SYS_ADMIN` capability, which allows the plugin to run + the `mount` command. + +2. Check that the plugin is enabled in the output of `docker plugin ls`. + + ```bash + $ docker plugin ls + + ID NAME TAG DESCRIPTION ENABLED + 69553ca1d789 vieux/sshfs latest the `sshfs` plugin true + ``` + +3. Create a volume using the plugin. + This example mounts the `/remote` directory on host `1.2.3.4` into a + volume named `sshvolume`. + + This volume can now be mounted into containers. + + ```bash + $ docker volume create \ + -d vieux/sshfs \ + --name sshvolume \ + -o sshcmd=user@1.2.3.4:/remote \ + -o password=$(cat file_containing_password_for_remote_host) + + sshvolume + ``` +4. Verify that the volume was created successfully. + + ```bash + $ docker volume ls + + DRIVER NAME + vieux/sshfs sshvolume + ``` + +5. Start a container that uses the volume `sshvolume`. + + ```bash + $ docker run --rm -v sshvolume:/data busybox ls /data + + + ``` + +6. Remove the volume `sshvolume` + ```bash + docker volume rm sshvolume + + sshvolume + ``` +To disable a plugin, use the `docker plugin disable` command. To completely +remove it, use the `docker plugin remove` command. For other available +commands and options, see the +[command line reference](../reference/commandline/index.md). + + +## Developing a plugin + +#### The rootfs directory +The `rootfs` directory represents the root filesystem of the plugin. In this +example, it was created from a Dockerfile: + +>**Note:** The `/run/docker/plugins` directory is mandatory inside of the +plugin's filesystem for docker to communicate with the plugin. + +```bash +$ git clone https://github.com/vieux/docker-volume-sshfs +$ cd docker-volume-sshfs +$ docker build -t rootfsimage . +$ id=$(docker create rootfsimage true) # id was cd851ce43a403 when the image was created +$ sudo mkdir -p myplugin/rootfs +$ sudo docker export "$id" | sudo tar -x -C myplugin/rootfs +$ docker rm -vf "$id" +$ docker rmi rootfsimage +``` + +#### The config.json file + +The `config.json` file describes the plugin. See the [plugins config reference](config.md). + +Consider the following `config.json` file. + +```json +{ + "description": "sshFS plugin for Docker", + "documentation": "https://docs.docker.com/engine/extend/plugins/", + "entrypoint": ["/docker-volume-sshfs"], + "network": { + "type": "host" + }, + "interface" : { + "types": ["docker.volumedriver/1.0"], + "socket": "sshfs.sock" + }, + "linux": { + "capabilities": ["CAP_SYS_ADMIN"] + } +} +``` + +This plugin is a volume driver. It requires a `host` network and the +`CAP_SYS_ADMIN` capability. It depends upon the `/docker-volume-sshfs` +entrypoint and uses the `/run/docker/plugins/sshfs.sock` socket to communicate +with Docker Engine. This plugin has no runtime parameters. + +#### Creating the plugin + +A new plugin can be created by running +`docker plugin create ./path/to/plugin/data` where the plugin +data contains a plugin configuration file `config.json` and a root filesystem +in subdirectory `rootfs`. + +After that the plugin `` will show up in `docker plugin ls`. +Plugins can be pushed to remote registries with +`docker plugin push `. + + +## Debugging plugins + +Stdout of a plugin is redirected to dockerd logs. Such entries have a +`plugin=` suffix. Here are a few examples of commands for pluginID +`f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62` and their +corresponding log entries in the docker daemon logs. + +```bash +$ docker plugin install tiborvass/sample-volume-plugins + +INFO[0036] Starting... Found 0 volumes on startup plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +``` + +```bash +$ docker volume create -d tiborvass/sample-volume-plugins samplevol + +INFO[0193] Create Called... Ensuring directory /data/samplevol exists on host... plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0193] open /var/lib/docker/plugin-data/local-persist.json: no such file or directory plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0193] Created volume samplevol with mountpoint /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0193] Path Called... Returned path /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +``` + +```bash +$ docker run -v samplevol:/tmp busybox sh + +INFO[0421] Get Called... Found samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0421] Mount Called... Mounted samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0421] Path Called... Returned path /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0421] Unmount Called... Unmounted samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +``` + +#### Using docker-runc to obtain logfiles and shell into the plugin. + +`docker-runc`, the default docker container runtime can be used for debugging +plugins. This is specifically useful to collect plugin logs if they are +redirected to a file. + +```bash +$ docker-runc list +ID PID STATUS BUNDLE CREATED +f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 2679 running /run/docker/libcontainerd/f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 2017-02-06T21:53:03.031537592Z +r +``` + +```bash +$ docker-runc exec f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 cat /var/log/plugin.log +``` + +If the plugin has a built-in shell, then exec into the plugin can be done as +follows: +```bash +$ docker-runc exec -t f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 sh +``` + +#### Using curl to debug plugin socket issues. + +To verify if the plugin API socket that the docker daemon communicates with +is responsive, use curl. In this example, we will make API calls from the +docker host to volume and network plugins using curl 7.47.0 to ensure that +the plugin is listening on the said socket. For a well functioning plugin, +these basic requests should work. Note that plugin sockets are available on the host under `/var/run/docker/plugins/` + + +```bash +curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/e8a37ba56fc879c991f7d7921901723c64df6b42b87e6a0b055771ecf8477a6d/plugin.sock http:/VolumeDriver.List + +{"Mountpoint":"","Err":"","Volumes":[{"Name":"myvol1","Mountpoint":"/data/myvol1"},{"Name":"myvol2","Mountpoint":"/data/myvol2"}],"Volume":null} +``` + +```bash +curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/45e00a7ce6185d6e365904c8bcf62eb724b1fe307e0d4e7ecc9f6c1eb7bcdb70/plugin.sock http:/NetworkDriver.GetCapabilities + +{"Scope":"local"} +``` +When using curl 7.5 and above, the URL should be of the form +`http://hostname/APICall`, where `hostname` is the valid hostname where the +plugin is installed and `APICall` is the call to the plugin API. + +For example, `http://localhost/VolumeDriver.List` diff --git a/components/engine/docs/extend/legacy_plugins.md b/components/engine/docs/extend/legacy_plugins.md new file mode 100644 index 0000000000..68bba59f46 --- /dev/null +++ b/components/engine/docs/extend/legacy_plugins.md @@ -0,0 +1,101 @@ +--- +redirect_from: +- "/engine/extend/plugins/" +title: "Use Docker Engine plugins" +description: "How to add additional functionality to Docker with plugins extensions" +keywords: "Examples, Usage, plugins, docker, documentation, user guide" +--- + + + +This document describes the Docker Engine plugins generally available in Docker +Engine. To view information on plugins managed by Docker, +refer to [Docker Engine plugin system](index.md). + +You can extend the capabilities of the Docker Engine by loading third-party +plugins. This page explains the types of plugins and provides links to several +volume and network plugins for Docker. + +## Types of plugins + +Plugins extend Docker's functionality. They come in specific types. For +example, a [volume plugin](plugins_volume.md) might enable Docker +volumes to persist across multiple Docker hosts and a +[network plugin](plugins_network.md) might provide network plumbing. + +Currently Docker supports authorization, volume and network driver plugins. In the future it +will support additional plugin types. + +## Installing a plugin + +Follow the instructions in the plugin's documentation. + +## Finding a plugin + +The sections below provide an inexhaustive overview of available plugins. + + + +### Network plugins + +Plugin | Description +----------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +[Contiv Networking](https://github.com/contiv/netplugin) | An open source network plugin to provide infrastructure and security policies for a multi-tenant micro services deployment, while providing an integration to physical network for non-container workload. Contiv Networking implements the remote driver and IPAM APIs available in Docker 1.9 onwards. +[Kuryr Network Plugin](https://github.com/openstack/kuryr) | A network plugin is developed as part of the OpenStack Kuryr project and implements the Docker networking (libnetwork) remote driver API by utilizing Neutron, the OpenStack networking service. It includes an IPAM driver as well. +[Weave Network Plugin](https://www.weave.works/docs/net/latest/introducing-weave/) | A network plugin that creates a virtual network that connects your Docker containers - across multiple hosts or clouds and enables automatic discovery of applications. Weave networks are resilient, partition tolerant, secure and work in partially connected networks, and other adverse environments - all configured with delightful simplicity. + +### Volume plugins + +Plugin | Description +----------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +[Azure File Storage plugin](https://github.com/Azure/azurefile-dockervolumedriver) | Lets you mount Microsoft [Azure File Storage](https://azure.microsoft.com/blog/azure-file-storage-now-generally-available/) shares to Docker containers as volumes using the SMB 3.0 protocol. [Learn more](https://azure.microsoft.com/blog/persistent-docker-volumes-with-azure-file-storage/). +[BeeGFS Volume Plugin](https://github.com/RedCoolBeans/docker-volume-beegfs) | An open source volume plugin to create persistent volumes in a BeeGFS parallel file system. +[Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume) | A volume plugin that provides access to an extensible set of container-based persistent storage options. It supports single and multi-host Docker environments with features that include tenant isolation, automated provisioning, encryption, secure deletion, snapshots and QoS. +[Contiv Volume Plugin](https://github.com/contiv/volplugin) | An open source volume plugin that provides multi-tenant, persistent, distributed storage with intent based consumption. It has support for Ceph and NFS. +[Convoy plugin](https://github.com/rancher/convoy) | A volume plugin for a variety of storage back-ends including device mapper and NFS. It's a simple standalone executable written in Go and provides the framework to support vendor-specific extensions such as snapshots, backups and restore. +[DigitalOcean Block Storage plugin](https://github.com/omallo/docker-volume-plugin-dostorage) | Integrates DigitalOcean's [block storage solution](https://www.digitalocean.com/products/storage/) into the Docker ecosystem by automatically attaching a given block storage volume to a DigitalOcean droplet and making the contents of the volume available to Docker containers running on that droplet. +[DRBD plugin](https://www.drbd.org/en/supported-projects/docker) | A volume plugin that provides highly available storage replicated by [DRBD](https://www.drbd.org). Data written to the docker volume is replicated in a cluster of DRBD nodes. +[Flocker plugin](https://clusterhq.com/docker-plugin/) | A volume plugin that provides multi-host portable volumes for Docker, enabling you to run databases and other stateful containers and move them around across a cluster of machines. +[Fuxi Volume Plugin](https://github.com/openstack/fuxi) | A volume plugin that is developed as part of the OpenStack Kuryr project and implements the Docker volume plugin API by utilizing Cinder, the OpenStack block storage service. +[gce-docker plugin](https://github.com/mcuadros/gce-docker) | A volume plugin able to attach, format and mount Google Compute [persistent-disks](https://cloud.google.com/compute/docs/disks/persistent-disks). +[GlusterFS plugin](https://github.com/calavera/docker-volume-glusterfs) | A volume plugin that provides multi-host volumes management for Docker using GlusterFS. +[Horcrux Volume Plugin](https://github.com/muthu-r/horcrux) | A volume plugin that allows on-demand, version controlled access to your data. Horcrux is an open-source plugin, written in Go, and supports SCP, [Minio](https://www.minio.io) and Amazon S3. +[HPE 3Par Volume Plugin](https://github.com/hpe-storage/python-hpedockerplugin/) | A volume plugin that supports HPE 3Par and StoreVirtual iSCSI storage arrays. +[IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs) | An open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume. +[Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) | A plugin that provides credentials and secret management using Keywhiz as a central repository. +[Local Persist Plugin](https://github.com/CWSpear/local-persist) | A volume plugin that extends the default `local` driver's functionality by allowing you specify a mountpoint anywhere on the host, which enables the files to *always persist*, even if the volume is removed via `docker volume rm`. +[NetApp Plugin](https://github.com/NetApp/netappdvp) (nDVP) | A volume plugin that provides direct integration with the Docker ecosystem for the NetApp storage portfolio. The nDVP package supports the provisioning and management of storage resources from the storage platform to Docker hosts, with a robust framework for adding additional platforms in the future. +[Netshare plugin](https://github.com/ContainX/docker-volume-netshare) | A volume plugin that provides volume management for NFS 3/4, AWS EFS and CIFS file systems. +[Nimble Storage Volume Plugin](https://connect.nimblestorage.com/community/app-integration/docker)| A volume plug-in that integrates with Nimble Storage Unified Flash Fabric arrays. The plug-in abstracts array volume capabilities to the Docker administrator to allow self-provisioning of secure multi-tenant volumes and clones. +[OpenStorage Plugin](https://github.com/libopenstorage/openstorage) | A cluster-aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few. +[Portworx Volume Plugin](https://github.com/portworx/px-dev) | A volume plugin that turns any server into a scale-out converged compute/storage node, providing container granular storage and highly available volumes across any node, using a shared-nothing storage backend that works with any docker scheduler. +[Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) | A volume plugin that connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform. +[REX-Ray plugin](https://github.com/emccode/rexray) | A volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC. +[Virtuozzo Storage and Ploop plugin](https://github.com/virtuozzo/docker-volume-ploop) | A volume plugin with support for Virtuozzo Storage distributed cloud file system as well as ploop devices. +[VMware vSphere Storage Plugin](https://github.com/vmware/docker-volume-vsphere) | Docker Volume Driver for vSphere enables customers to address persistent storage requirements for Docker containers in vSphere environments. + +### Authorization plugins + + Plugin | Description +------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + [Twistlock AuthZ Broker](https://github.com/twistlock/authz) | A basic extendable authorization plugin that runs directly on the host or inside a container. This plugin allows you to define user policies that it evaluates during authorization. Basic authorization is provided if Docker daemon is started with the --tlsverify flag (username is extracted from the certificate common name). + [HBM plugin](https://github.com/kassisol/hbm) | An authorization plugin that prevents from executing commands with certains parameters. + +## Troubleshooting a plugin + +If you are having problems with Docker after loading a plugin, ask the authors +of the plugin for help. The Docker team may not be able to assist you. + +## Writing a plugin + +If you are interested in writing a plugin for Docker, or seeing how they work +under the hood, see the [docker plugins reference](plugin_api.md). diff --git a/components/engine/docs/extend/plugin_api.md b/components/engine/docs/extend/plugin_api.md new file mode 100644 index 0000000000..693b77a2f3 --- /dev/null +++ b/components/engine/docs/extend/plugin_api.md @@ -0,0 +1,196 @@ +--- +title: "Plugins API" +description: "How to write Docker plugins extensions " +keywords: "API, Usage, plugins, documentation, developer" +--- + + + +# Docker Plugin API + +Docker plugins are out-of-process extensions which add capabilities to the +Docker Engine. + +This document describes the Docker Engine plugin API. To view information on +plugins managed by Docker Engine, refer to [Docker Engine plugin system](index.md). + +This page is intended for people who want to develop their own Docker plugin. +If you just want to learn about or use Docker plugins, look +[here](legacy_plugins.md). + +## What plugins are + +A plugin is a process running on the same or a different host as the docker daemon, +which registers itself by placing a file on the same docker host in one of the plugin +directories described in [Plugin discovery](#plugin-discovery). + +Plugins have human-readable names, which are short, lowercase strings. For +example, `flocker` or `weave`. + +Plugins can run inside or outside containers. Currently running them outside +containers is recommended. + +## Plugin discovery + +Docker discovers plugins by looking for them in the plugin directory whenever a +user or container tries to use one by name. + +There are three types of files which can be put in the plugin directory. + +* `.sock` files are UNIX domain sockets. +* `.spec` files are text files containing a URL, such as `unix:///other.sock` or `tcp://localhost:8080`. +* `.json` files are text files containing a full json specification for the plugin. + +Plugins with UNIX domain socket files must run on the same docker host, whereas +plugins with spec or json files can run on a different host if a remote URL is specified. + +UNIX domain socket files must be located under `/run/docker/plugins`, whereas +spec files can be located either under `/etc/docker/plugins` or `/usr/lib/docker/plugins`. + +The name of the file (excluding the extension) determines the plugin name. + +For example, the `flocker` plugin might create a UNIX socket at +`/run/docker/plugins/flocker.sock`. + +You can define each plugin into a separated subdirectory if you want to isolate definitions from each other. +For example, you can create the `flocker` socket under `/run/docker/plugins/flocker/flocker.sock` and only +mount `/run/docker/plugins/flocker` inside the `flocker` container. + +Docker always searches for unix sockets in `/run/docker/plugins` first. It checks for spec or json files under +`/etc/docker/plugins` and `/usr/lib/docker/plugins` if the socket doesn't exist. The directory scan stops as +soon as it finds the first plugin definition with the given name. + +### JSON specification + +This is the JSON format for a plugin: + +```json +{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin", + "TLSConfig": { + "InsecureSkipVerify": false, + "CAFile": "/usr/shared/docker/certs/example-ca.pem", + "CertFile": "/usr/shared/docker/certs/example-cert.pem", + "KeyFile": "/usr/shared/docker/certs/example-key.pem" + } +} +``` + +The `TLSConfig` field is optional and TLS will only be verified if this configuration is present. + +## Plugin lifecycle + +Plugins should be started before Docker, and stopped after Docker. For +example, when packaging a plugin for a platform which supports `systemd`, you +might use [`systemd` dependencies]( +http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to +manage startup and shutdown order. + +When upgrading a plugin, you should first stop the Docker daemon, upgrade the +plugin, then start Docker again. + +## Plugin activation + +When a plugin is first referred to -- either by a user referring to it by name +(e.g. `docker run --volume-driver=foo`) or a container already configured to +use a plugin being started -- Docker looks for the named plugin in the plugin +directory and activates it with a handshake. See Handshake API below. + +Plugins are *not* activated automatically at Docker daemon startup. Rather, +they are activated only lazily, or on-demand, when they are needed. + +## Systemd socket activation + +Plugins may also be socket activated by `systemd`. The official [Plugins helpers](https://github.com/docker/go-plugins-helpers) +natively supports socket activation. In order for a plugin to be socket activated it needs +a `service` file and a `socket` file. + +The `service` file (for example `/lib/systemd/system/your-plugin.service`): + +``` +[Unit] +Description=Your plugin +Before=docker.service +After=network.target your-plugin.socket +Requires=your-plugin.socket docker.service + +[Service] +ExecStart=/usr/lib/docker/your-plugin + +[Install] +WantedBy=multi-user.target +``` +The `socket` file (for example `/lib/systemd/system/your-plugin.socket`): + +``` +[Unit] +Description=Your plugin + +[Socket] +ListenStream=/run/docker/plugins/your-plugin.sock + +[Install] +WantedBy=sockets.target +``` + +This will allow plugins to be actually started when the Docker daemon connects to +the sockets they're listening on (for instance the first time the daemon uses them +or if one of the plugin goes down accidentally). + +## API design + +The Plugin API is RPC-style JSON over HTTP, much like webhooks. + +Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to +implement an HTTP server and bind this to the UNIX socket mentioned in the +"plugin discovery" section. + +All requests are HTTP `POST` requests. + +The API is versioned via an Accept header, which currently is always set to +`application/vnd.docker.plugins.v1+json`. + +## Handshake API + +Plugins are activated via the following "handshake" API call. + +### /Plugin.Activate + +**Request:** empty body + +**Response:** +``` +{ + "Implements": ["VolumeDriver"] +} +``` + +Responds with a list of Docker subsystems which this plugin implements. +After activation, the plugin will then be sent events from this subsystem. + +Possible values are: + +* [`authz`](plugins_authorization.md) +* [`NetworkDriver`](plugins_network.md) +* [`VolumeDriver`](plugins_volume.md) + + +## Plugin retries + +Attempts to call a method on a plugin are retried with an exponential backoff +for up to 30 seconds. This may help when packaging plugins as containers, since +it gives plugin containers a chance to start up before failing any user +containers which depend on them. + +## Plugins helpers + +To ease plugins development, we're providing an `sdk` for each kind of plugins +currently supported by Docker at [docker/go-plugins-helpers](https://github.com/docker/go-plugins-helpers). diff --git a/components/engine/docs/extend/plugins_authorization.md b/components/engine/docs/extend/plugins_authorization.md new file mode 100644 index 0000000000..ac1837f754 --- /dev/null +++ b/components/engine/docs/extend/plugins_authorization.md @@ -0,0 +1,260 @@ +--- +title: "Access authorization plugin" +description: "How to create authorization plugins to manage access control to your Docker daemon." +keywords: "security, authorization, authentication, docker, documentation, plugin, extend" +redirect_from: +- "/engine/extend/authorization/" +--- + + + +# Create an authorization plugin + +This document describes the Docker Engine plugins generally available in Docker +Engine. To view information on plugins managed by Docker Engine, +refer to [Docker Engine plugin system](index.md). + +Docker's out-of-the-box authorization model is all or nothing. Any user with +permission to access the Docker daemon can run any Docker client command. The +same is true for callers using Docker's Engine API to contact the daemon. If you +require greater access control, you can create authorization plugins and add +them to your Docker daemon configuration. Using an authorization plugin, a +Docker administrator can configure granular access policies for managing access +to Docker daemon. + +Anyone with the appropriate skills can develop an authorization plugin. These +skills, at their most basic, are knowledge of Docker, understanding of REST, and +sound programming knowledge. This document describes the architecture, state, +and methods information available to an authorization plugin developer. + +## Basic principles + +Docker's [plugin infrastructure](plugin_api.md) enables +extending Docker by loading, removing and communicating with +third-party components using a generic API. The access authorization subsystem +was built using this mechanism. + +Using this subsystem, you don't need to rebuild the Docker daemon to add an +authorization plugin. You can add a plugin to an installed Docker daemon. You do +need to restart the Docker daemon to add a new plugin. + +An authorization plugin approves or denies requests to the Docker daemon based +on both the current authentication context and the command context. The +authentication context contains all user details and the authentication method. +The command context contains all the relevant request data. + +Authorization plugins must follow the rules described in [Docker Plugin API](plugin_api.md). +Each plugin must reside within directories described under the +[Plugin discovery](plugin_api.md#plugin-discovery) section. + +**Note**: the abbreviations `AuthZ` and `AuthN` mean authorization and authentication +respectively. + +## Default user authorization mechanism + +If TLS is enabled in the [Docker daemon](https://docs.docker.com/engine/security/https/), the default user authorization flow extracts the user details from the certificate subject name. +That is, the `User` field is set to the client certificate subject common name, and the `AuthenticationMethod` field is set to `TLS`. + +## Basic architecture + +You are responsible for registering your plugin as part of the Docker daemon +startup. You can install multiple plugins and chain them together. This chain +can be ordered. Each request to the daemon passes in order through the chain. +Only when all the plugins grant access to the resource, is the access granted. + +When an HTTP request is made to the Docker daemon through the CLI or via the +Engine API, the authentication subsystem passes the request to the installed +authentication plugin(s). The request contains the user (caller) and command +context. The plugin is responsible for deciding whether to allow or deny the +request. + +The sequence diagrams below depict an allow and deny authorization flow: + +![Authorization Allow flow](images/authz_allow.png) + +![Authorization Deny flow](images/authz_deny.png) + +Each request sent to the plugin includes the authenticated user, the HTTP +headers, and the request/response body. Only the user name and the +authentication method used are passed to the plugin. Most importantly, no user +credentials or tokens are passed. Finally, not all request/response bodies +are sent to the authorization plugin. Only those request/response bodies where +the `Content-Type` is either `text/*` or `application/json` are sent. + +For commands that can potentially hijack the HTTP connection (`HTTP +Upgrade`), such as `exec`, the authorization plugin is only called for the +initial HTTP requests. Once the plugin approves the command, authorization is +not applied to the rest of the flow. Specifically, the streaming data is not +passed to the authorization plugins. For commands that return chunked HTTP +response, such as `logs` and `events`, only the HTTP request is sent to the +authorization plugins. + +During request/response processing, some authorization flows might +need to do additional queries to the Docker daemon. To complete such flows, +plugins can call the daemon API similar to a regular user. To enable these +additional queries, the plugin must provide the means for an administrator to +configure proper authentication and security policies. + +## Docker client flows + +To enable and configure the authorization plugin, the plugin developer must +support the Docker client interactions detailed in this section. + +### Setting up Docker daemon + +Enable the authorization plugin with a dedicated command line flag in the +`--authorization-plugin=PLUGIN_ID` format. The flag supplies a `PLUGIN_ID` +value. This value can be the plugin’s socket or a path to a specification file. +Authorization plugins can be loaded without restarting the daemon. Refer +to the [`dockerd` documentation](../reference/commandline/dockerd.md#configuration-reloading) for more information. + +```bash +$ dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... +``` + +Docker's authorization subsystem supports multiple `--authorization-plugin` parameters. + +### Calling authorized command (allow) + +```bash +$ docker pull centos +... +f1b10cd84249: Pull complete +... +``` + +### Calling unauthorized command (deny) + +```bash +$ docker pull centos +... +docker: Error response from daemon: authorization denied by plugin PLUGIN_NAME: volumes are not allowed. +``` + +### Error from plugins + +```bash +$ docker pull centos +... +docker: Error response from daemon: plugin PLUGIN_NAME failed with error: AuthZPlugin.AuthZReq: Cannot connect to the Docker daemon. Is the docker daemon running on this host?. +``` + +## API schema and implementation + +In addition to Docker's standard plugin registration method, each plugin +should implement the following two methods: + +* `/AuthZPlugin.AuthZReq` This authorize request method is called before the Docker daemon processes the client request. + +* `/AuthZPlugin.AuthZRes` This authorize response method is called before the response is returned from Docker daemon to the client. + +#### /AuthZPlugin.AuthZReq + +**Request**: + +```json +{ + "User": "The user identification", + "UserAuthNMethod": "The authentication method used", + "RequestMethod": "The HTTP method", + "RequestURI": "The HTTP request URI", + "RequestBody": "Byte array containing the raw HTTP request body", + "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string " +} +``` + +**Response**: + +```json +{ + "Allow": "Determined whether the user is allowed or not", + "Msg": "The authorization message", + "Err": "The error message if things go wrong" +} +``` +#### /AuthZPlugin.AuthZRes + +**Request**: + +```json +{ + "User": "The user identification", + "UserAuthNMethod": "The authentication method used", + "RequestMethod": "The HTTP method", + "RequestURI": "The HTTP request URI", + "RequestBody": "Byte array containing the raw HTTP request body", + "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string", + "ResponseBody": "Byte array containing the raw HTTP response body", + "ResponseHeader": "Byte array containing the raw HTTP response header as a map[string][]string", + "ResponseStatusCode":"Response status code" +} +``` + +**Response**: + +```json +{ + "Allow": "Determined whether the user is allowed or not", + "Msg": "The authorization message", + "Err": "The error message if things go wrong" +} +``` + +### Request authorization + +Each plugin must support two request authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. + +#### Daemon -> Plugin + +Name | Type | Description +-----------------------|-------------------|------------------------------------------------------- +User | string | The user identification +Authentication method | string | The authentication method used +Request method | enum | The HTTP method (GET/DELETE/POST) +Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) +Request headers | map[string]string | Request headers as key value pairs (without the authorization header) +Request body | []byte | Raw request body + + +#### Plugin -> Daemon + +Name | Type | Description +--------|--------|---------------------------------------------------------------------------------- +Allow | bool | Boolean value indicating whether the request is allowed or denied +Msg | string | Authorization message (will be returned to the client in case the access is denied) +Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) + +### Response authorization + +The plugin must support two authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. + +#### Daemon -> Plugin + + +Name | Type | Description +----------------------- |------------------ |---------------------------------------------------- +User | string | The user identification +Authentication method | string | The authentication method used +Request method | string | The HTTP method (GET/DELETE/POST) +Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) +Request headers | map[string]string | Request headers as key value pairs (without the authorization header) +Request body | []byte | Raw request body +Response status code | int | Status code from the docker daemon +Response headers | map[string]string | Response headers as key value pairs +Response body | []byte | Raw docker daemon response body + + +#### Plugin -> Daemon + +Name | Type | Description +--------|--------|---------------------------------------------------------------------------------- +Allow | bool | Boolean value indicating whether the response is allowed or denied +Msg | string | Authorization message (will be returned to the client in case the access is denied) +Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) diff --git a/components/engine/docs/extend/plugins_graphdriver.md b/components/engine/docs/extend/plugins_graphdriver.md new file mode 100644 index 0000000000..c134b1ebc4 --- /dev/null +++ b/components/engine/docs/extend/plugins_graphdriver.md @@ -0,0 +1,403 @@ +--- +title: "Graphdriver plugins" +description: "How to manage image and container filesystems with external plugins" +keywords: "Examples, Usage, storage, image, docker, data, graph, plugin, api" +advisory: experimental +--- + + + + +## Changelog + +### 1.13.0 + +- Support v2 plugins + +# Docker graph driver plugins + +Docker graph driver plugins enable admins to use an external/out-of-process +graph driver for use with Docker engine. This is an alternative to using the +built-in storage drivers, such as aufs/overlay/devicemapper/btrfs. + +You need to install and enable the plugin and then restart the Docker daemon +before using the plugin. See the following example for the correct ordering +of steps. + +``` +$ docker plugin install cpuguy83/docker-overlay2-graphdriver-plugin # this command also enables the driver + +$ pkill dockerd +$ dockerd --experimental -s cpuguy83/docker-overlay2-graphdriver-plugin +``` + +# Write a graph driver plugin + +See the [plugin documentation](https://docs.docker.com/engine/extend/) for detailed information +on the underlying plugin protocol. + + +## Graph Driver plugin protocol + +If a plugin registers itself as a `GraphDriver` when activated, then it is +expected to provide the rootfs for containers as well as image layer storage. + +### /GraphDriver.Init + +**Request**: +```json +{ + "Home": "/graph/home/path", + "Opts": [], + "UIDMaps": [], + "GIDMaps": [] +} +``` + +Initialize the graph driver plugin with a home directory and array of options. +These are passed through from the user, but the plugin is not required to parse +or honor them. + +The request also includes a list of UID and GID mappings, structed as follows: +```json +{ + "ContainerID": 0, + "HostID": 0, + "Size": 0 +} +``` + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + + +### /GraphDriver.Capabilities + +**Request**: +```json +{} +``` + +Get behavioral characteristics of the graph driver. If a plugin does not handle +this request, the engine will use default values for all capabilities. + +**Response**: +```json +{ + "ReproducesExactDiffs": false, +} +``` + +Respond with values of capabilities: + +* **ReproducesExactDiffs** Defaults to false. Flags that this driver is capable +of reproducing exactly equivalent diffs for read-only filesystem layers. + + +### /GraphDriver.Create + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", + "MountLabel": "", + "StorageOpt": {} +} +``` + +Create a new, empty, read-only filesystem layer with the specified +`ID`, `Parent` and `MountLabel`. If `Parent` is an empty string, there is no +parent layer. `StorageOpt` is map of strings which indicate storage options. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.CreateReadWrite + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", + "MountLabel": "", + "StorageOpt": {} +} +``` + +Similar to `/GraphDriver.Create` but creates a read-write filesystem layer. + +### /GraphDriver.Remove + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Remove the filesystem layer with this given `ID`. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Get + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "MountLabel": "" +} +``` + +Get the mountpoint for the layered filesystem referred to by the given `ID`. + +**Response**: +```json +{ + "Dir": "/var/mygraph/46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Err": "" +} +``` + +Respond with the absolute path to the mounted layered filesystem. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Put + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Release the system resources for the specified `ID`, such as unmounting the +filesystem layer. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Exists + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Determine if a filesystem layer with the specified `ID` exists. + +**Response**: +```json +{ + "Exists": true +} +``` + +Respond with a boolean for whether or not the filesystem layer with the specified +`ID` exists. + +### /GraphDriver.Status + +**Request**: +```json +{} +``` + +Get low-level diagnostic information about the graph driver. + +**Response**: +```json +{ + "Status": [[]] +} +``` + +Respond with a 2-D array with key/value pairs for the underlying status +information. + + +### /GraphDriver.GetMetadata + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Get low-level diagnostic information about the layered filesystem with the +with the specified `ID` + +**Response**: +```json +{ + "Metadata": {}, + "Err": "" +} +``` + +Respond with a set of key/value pairs containing the low-level diagnostic +information about the layered filesystem. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Cleanup + +**Request**: +```json +{} +``` + +Perform necessary tasks to release resources help by the plugin, such as +unmounting all the layered file systems. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + + +### /GraphDriver.Diff + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Get an archive of the changes between the filesystem layers specified by the `ID` +and `Parent`. `Parent` may be an empty string, in which case there is no parent. + +**Response**: +``` +{% raw %} +{{ TAR STREAM }} +{% endraw %} +``` + +### /GraphDriver.Changes + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Get a list of changes between the filesystem layers specified by the `ID` and +`Parent`. If `Parent` is an empty string, there is no parent. + +**Response**: +```json +{ + "Changes": [{}], + "Err": "" +} +``` + +Respond with a list of changes. The structure of a change is: +```json + "Path": "/some/path", + "Kind": 0, +``` + +Where the `Path` is the filesystem path within the layered filesystem that is +changed and `Kind` is an integer specifying the type of change that occurred: + +- 0 - Modified +- 1 - Added +- 2 - Deleted + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.ApplyDiff + +**Request**: +``` +{% raw %} +{{ TAR STREAM }} +{% endraw %} +``` + +Extract the changeset from the given diff into the layer with the specified `ID` +and `Parent` + +**Query Parameters**: + +- id (required)- the `ID` of the new filesystem layer to extract the diff to +- parent (required)- the `Parent` of the given `ID` + +**Response**: +```json +{ + "Size": 512366, + "Err": "" +} +``` + +Respond with the size of the new layer in bytes. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.DiffSize + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Calculate the changes between the specified `ID` + +**Response**: +```json +{ + "Size": 512366, + "Err": "" +} +``` + +Respond with the size changes between the specified `ID` and `Parent` +Respond with a non-empty string error if an error occurred. diff --git a/components/engine/docs/extend/plugins_logging.md b/components/engine/docs/extend/plugins_logging.md new file mode 100644 index 0000000000..8f687b4a01 --- /dev/null +++ b/components/engine/docs/extend/plugins_logging.md @@ -0,0 +1,220 @@ +--- +title: "Docker log driver plugins" +description: "Log driver plugins." +keywords: "Examples, Usage, plugins, docker, documentation, user guide, logging" +--- + + + +# Logging driver plugins + +This document describes logging driver plugins for Docker. + +Logging drivers enables users to forward container logs to another service for +processing. Docker includes several logging drivers as built-ins, however can +never hope to support all use-cases with built-in drivers. Plugins allow Docker +to support a wide range of logging services without requiring to embed client +libraries for these services in the main Docker codebase. See the +[plugin documentation](legacy_plugins.md) for more information. + +## Create a logging plugin + +The main interface for logging plugins uses the same JSON+HTTP RPC protocol used +by other plugin types. See the +[example](https://github.com/cpuguy83/docker-log-driver-test) plugin for a +reference implementation of a logging plugin. The example wraps the built-in +`jsonfilelog` log driver. + +## LogDriver protocol + +Logging plugins must register as a `LogDriver` during plugin activation. Once +activated users can specify the plugin as a log driver. + +There are two HTTP endpoints that logging plugins must implement: + +### `/LogDriver.StartLogging` + +Signals to the plugin that a container is starting that the plugin should start +receiving logs for. + +Logs will be streamed over the defined file in the request. On Linux this file +is a FIFO. Logging plugins are not currently supported on Windows. + +**Request**: +```json +{ + "File": "/path/to/file/stream", + "Info": { + "ContainerID": "123456" + } +} +``` + +`File` is the path to the log stream that needs to be consumed. Each call to +`StartLogging` should provide a different file path, even if it's a container +that the plugin has already received logs for prior. The file is created by +docker with a randomly generated name. + +`Info` is details about the container that's being logged. This is fairly +free-form, but is defined by the following struct definition: + +```go +type Info struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + ContainerEnv []string + ContainerLabels map[string]string + LogPath string + DaemonName string +} +``` + + +`ContainerID` will always be supplied with this struct, but other fields may be +empty or missing. + +**Response** +```json +{ + "Err": "" +} +``` + +If an error occurred during this request, add an error message to the `Err` field +in the response. If no error then you can either send an empty response (`{}`) +or an empty value for the `Err` field. + +The driver should at this point be consuming log messages from the passed in file. +If messages are unconsumed, it may cause the container to block while trying to +write to its stdio streams. + +Log stream messages are encoded as protocol buffers. The protobuf definitions are +in the +[docker repository](https://github.com/docker/docker/blob/master/api/types/plugins/logdriver/entry.proto). + +Since protocol buffers are not self-delimited you must decode them from the stream +using the following stream format: + +``` +[size][message] +``` + +Where `size` is a 4-byte big endian binary encoded uint32. `size` in this case +defines the size of the next message. `message` is the actual log entry. + +A reference golang implementation of a stream encoder/decoder can be found +[here](https://github.com/docker/docker/blob/master/api/types/plugins/logdriver/io.go) + +### `/LogDriver.StopLogging` + +Signals to the plugin to stop collecting logs from the defined file. +Once a response is received, the file will be removed by Docker. You must make +sure to collect all logs on the stream before responding to this request or risk +losing log data. + +Requests on this endpoint does not mean that the container has been removed +only that it has stopped. + +**Request**: +```json +{ + "File": "/path/to/file/stream" +} +``` + +**Response**: +```json +{ + "Err": "" +} +``` + +If an error occurred during this request, add an error message to the `Err` field +in the response. If no error then you can either send an empty response (`{}`) +or an empty value for the `Err` field. + +## Optional endpoints + +Logging plugins can implement two extra logging endpoints: + +### `/LogDriver.Capabilities` + +Defines the capabilities of the log driver. You must implement this endpoint for +Docker to be able to take advantage of any of the defined capabilities. + +**Request**: +```json +{} +``` + +**Response**: +```json +{ + "ReadLogs": true +} +``` + +Supported capabilities: + +- `ReadLogs` - this tells Docker that the plugin is capable of reading back logs +to clients. Plugins that report that they support `ReadLogs` must implement the +`/LogDriver.ReadLogs` endpoint + +### `/LogDriver.ReadLogs` + +Reads back logs to the client. This is used when `docker logs ` is +called. + +In order for Docker to use this endpoint, the plugin must specify as much when +`/LogDriver.Capabilities` is called. + + +**Request**: +```json +{ + "ReadConfig": {}, + "Info": { + "ContainerID": "123456" + } +} +``` + +`ReadConfig` is the list of options for reading, it is defined with the following +golang struct: + +```go +type ReadConfig struct { + Since time.Time + Tail int + Follow bool +} +``` + +- `Since` defines the oldest log that should be sent. +- `Tail` defines the number of lines to read (e.g. like the command `tail -n 10`) +- `Follow` signals that the client wants to stay attached to receive new log messages +as they come in once the existing logs have been read. + +`Info` is the same type defined in `/LogDriver.StartLogging`. It should be used +to determine what set of logs to read. + +**Response**: +``` +{{ log stream }} +``` + +The response should be the encoded log message using the same format as the +messages that the plugin consumed from Docker. diff --git a/components/engine/docs/extend/plugins_metrics.md b/components/engine/docs/extend/plugins_metrics.md new file mode 100644 index 0000000000..a86c7f22d2 --- /dev/null +++ b/components/engine/docs/extend/plugins_metrics.md @@ -0,0 +1,85 @@ +--- +title: "Docker metrics collector plugins" +description: "Metrics plugins." +keywords: "Examples, Usage, plugins, docker, documentation, user guide, metrics" +--- + + + +# Metrics Collector Plugins + +Docker exposes internal metrics based on the prometheus format. Metrics plugins +enable accessing these metrics in a consistent way by providing a Unix +socket at a predefined path where the plugin can scrape the metrics. + +> **Note**: that while the plugin interface for metrics is non-experimental, the naming +of the metrics and metric labels is still considered experimental and may change +in a future version. + +## Creating a metrics plugin + +You must currently set `PropagatedMount` in the plugin `config.json` to +`/run/docker`. This allows the plugin to receive updated mounts +(the bind-mounted socket) from Docker after the plugin is already configured. + +## MetricsCollector protocol + +Metrics plugins must register as implementing the`MetricsCollector` interface +in `config.json`. + +On Unix platforms, the socket is located at `/run/docker/metrics.sock` in the +plugin's rootfs. + +`MetricsCollector` must implement two endpoints: + +### `MetricsCollector.StartMetrics` + +Signals to the plugin that the metrics socket is now available for scraping + +**Request** +```json +{} +``` + +The request has no playload. + +**Response** +```json +{ + "Err": "" +} +``` + +If an error occurred during this request, add an error message to the `Err` field +in the response. If no error then you can either send an empty response (`{}`) +or an empty value for the `Err` field. Errors will only be logged. + +### `MetricsCollector.StopMetrics` + +Signals to the plugin that the metrics socket is no longer available. +This may happen when the daemon is shutting down. + +**Request** +```json +{} +``` + +The request has no playload. + +**Response** +```json +{ + "Err": "" +} +``` + +If an error occurred during this request, add an error message to the `Err` field +in the response. If no error then you can either send an empty response (`{}`) +or an empty value for the `Err` field. Errors will only be logged. diff --git a/components/engine/docs/extend/plugins_network.md b/components/engine/docs/extend/plugins_network.md new file mode 100644 index 0000000000..a974862fa6 --- /dev/null +++ b/components/engine/docs/extend/plugins_network.md @@ -0,0 +1,77 @@ +--- +title: "Docker network driver plugins" +description: "Network driver plugins." +keywords: "Examples, Usage, plugins, docker, documentation, user guide" +--- + + + +# Engine network driver plugins + +This document describes Docker Engine network driver plugins generally +available in Docker Engine. To view information on plugins +managed by Docker Engine, refer to [Docker Engine plugin system](index.md). + +Docker Engine network plugins enable Engine deployments to be extended to +support a wide range of networking technologies, such as VXLAN, IPVLAN, MACVLAN +or something completely different. Network driver plugins are supported via the +LibNetwork project. Each plugin is implemented as a "remote driver" for +LibNetwork, which shares plugin infrastructure with Engine. Effectively, network +driver plugins are activated in the same way as other plugins, and use the same +kind of protocol. + +## Network driver plugins and swarm mode + +Docker 1.12 adds support for cluster management and orchestration called +[swarm mode](https://docs.docker.com/engine/swarm/). Docker Engine running in swarm mode currently +only supports the built-in overlay driver for networking. Therefore existing +networking plugins will not work in swarm mode. + +When you run Docker Engine outside of swarm mode, all networking plugins that +worked in Docker 1.11 will continue to function normally. They do not require +any modification. + +## Using network driver plugins + +The means of installing and running a network driver plugin depend on the +particular plugin. So, be sure to install your plugin according to the +instructions obtained from the plugin developer. + +Once running however, network driver plugins are used just like the built-in +network drivers: by being mentioned as a driver in network-oriented Docker +commands. For example, + + $ docker network create --driver weave mynet + +Some network driver plugins are listed in [plugins](legacy_plugins.md) + +The `mynet` network is now owned by `weave`, so subsequent commands +referring to that network will be sent to the plugin, + + $ docker run --network=mynet busybox top + + +## Write a network plugin + +Network plugins implement the [Docker plugin +API](plugin_api.md) and the network plugin protocol + +## Network plugin protocol + +The network driver protocol, in addition to the plugin activation call, is +documented as part of libnetwork: +[https://github.com/docker/libnetwork/blob/master/docs/remote.md](https://github.com/docker/libnetwork/blob/master/docs/remote.md). + +# Related Information + +To interact with the Docker maintainers and other interested users, see the IRC channel `#docker-network`. + +- [Docker networks feature overview](https://docs.docker.com/engine/userguide/networking/) +- The [LibNetwork](https://github.com/docker/libnetwork) project diff --git a/components/engine/docs/extend/plugins_services.md b/components/engine/docs/extend/plugins_services.md new file mode 100644 index 0000000000..79e344f9ce --- /dev/null +++ b/components/engine/docs/extend/plugins_services.md @@ -0,0 +1,186 @@ +--- +description: Using services with plugins +keywords: "API, Usage, plugins, documentation, developer" +title: Plugins and Services +--- + + + +# Using Volume and Network plugins in Docker services + +In swarm mode, it is possible to create a service that allows for attaching +to networks or mounting volumes that are backed by plugins. Swarm schedules +services based on plugin availability on a node. + + +### Volume plugins + +In this example, a volume plugin is installed on a swarm worker and a volume +is created using the plugin. In the manager, a service is created with the +relevant mount options. It can be observed that the service is scheduled to +run on the worker node with the said volume plugin and volume. Note that, +node1 is the manager and node2 is the worker. + +1. Prepare manager. In node 1: + + ```bash + $ docker swarm init + Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager. + ``` + +2. Join swarm, install plugin and create volume on worker. In node 2: + + ```bash + $ docker swarm join \ + --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \ + 192.168.99.100:2377 + ``` + + ```bash + $ docker plugin install tiborvass/sample-volume-plugin + latest: Pulling from tiborvass/sample-volume-plugin + eb9c16fbdc53: Download complete + Digest: sha256:00b42de88f3a3e0342e7b35fa62394b0a9ceb54d37f4c50be5d3167899994639 + Status: Downloaded newer image for tiborvass/sample-volume-plugin:latest + Installed plugin tiborvass/sample-volume-plugin + ``` + + ```bash + $ docker volume create -d tiborvass/sample-volume-plugin --name pluginVol + ``` + +3. Create a service using the plugin and volume. In node1: + + ```bash + $ docker service create --name my-service --mount type=volume,volume-driver=tiborvass/sample-volume-plugin,source=pluginVol,destination=/tmp busybox top + + $ docker service ls + z1sj8bb8jnfn my-service replicated 1/1 busybox:latest + ``` + docker service ls shows service 1 instance of service running. + +4. Observe the task getting scheduled in node 2: + + ```bash + {% raw %} + $ docker ps --format '{{.ID}}\t {{.Status}} {{.Names}} {{.Command}}' + 83fc1e842599 Up 2 days my-service.1.9jn59qzn7nbc3m0zt1hij12xs "top" + {% endraw %} + ``` + +### Network plugins + +In this example, a global scope network plugin is installed on both the +swarm manager and worker. A service is created with replicated instances +using the installed plugin. We will observe how the availability of the +plugin determines network creation and container scheduling. + +Note that node1 is the manager and node2 is the worker. + + +1. Install a global scoped network plugin on both manager and worker. On node1 + and node2: + + ```bash + $ docker plugin install bboreham/weave2 + Plugin "bboreham/weave2" is requesting the following privileges: + - network: [host] + - capabilities: [CAP_SYS_ADMIN CAP_NET_ADMIN] + Do you grant the above permissions? [y/N] y + latest: Pulling from bboreham/weave2 + 7718f575adf7: Download complete + Digest: sha256:2780330cc15644b60809637ee8bd68b4c85c893d973cb17f2981aabfadfb6d72 + Status: Downloaded newer image for bboreham/weave2:latest + Installed plugin bboreham/weave2 + ``` + +2. Create a network using plugin on manager. On node1: + + ```bash + $ docker network create --driver=bboreham/weave2:latest globalnet + + $ docker network ls + NETWORK ID NAME DRIVER SCOPE + qlj7ueteg6ly globalnet bboreham/weave2:latest swarm + ``` + +3. Create a service on the manager and have replicas set to 8. Observe that +containers get scheduled on both manager and worker. + + On node 1: + + ```bash + $ docker service create --network globalnet --name myservice --replicas=8 mrjana/simpleweb simpleweb +w90drnfzw85nygbie9kb89vpa + ``` + + ```bash + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 87520965206a mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.4.ytdzpktmwor82zjxkh118uf1v + 15e24de0f7aa mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.2.kh7a9n3iauq759q9mtxyfs9hp + c8c8f0144cdc mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.6.sjhpj5gr3xt33e3u2jycoj195 + 2e8e4b2c5c08 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.8.2z29zowsghx66u2velublwmrh + ``` + + On node 2: + + ```bash + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 53c0ae7c1dae mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up Less than a second myservice.7.x44tvvdm3iwkt9kif35f7ykz1 + 9b56c627fee0 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up Less than a second myservice.1.x7n1rm6lltw5gja3ueikze57q + d4f5927ba52c mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up 1 second myservice.5.i97bfo9uc6oe42lymafs9rz6k + 478c0d395bd7 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up Less than a second myservice.3.yr7nkffa48lff1vrl2r1m1ucs + ``` + +4. Scale down the number of instances. On node1: + + ```bash + $ docker service scale myservice=0 + myservice scaled to 0 + ``` + +5. Disable and uninstall the plugin on the worker. On node2: + + ```bash + $ docker plugin rm -f bboreham/weave2 + bboreham/weave2 + ``` + +6. Scale up the number of instances again. Observe that all containers are +scheduled on the master and not on the worker, because the plugin is not available on the worker anymore. + + On node 1: + + ```bash + $ docker service scale myservice=8 + myservice scaled to 8 + ``` + + ```bash + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + cf4b0ec2415e mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.3.r7p5o208jmlzpcbm2ytl3q6n1 + 57c64a6a2b88 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.4.dwoezsbb02ccstkhlqjy2xe7h + 3ac68cc4e7b8 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 35 seconds myservice.5.zx4ezdrm2nwxzkrwnxthv0284 + 006c3cb318fc mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.8.q0e3umt19y3h3gzo1ty336k5r + dd2ffebde435 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.7.a77y3u22prjipnrjg7vzpv3ba + a86c74d8b84b mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.6.z9nbn14bagitwol1biveeygl7 + 2846a7850ba0 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 37 seconds myservice.2.ypufz2eh9fyhppgb89g8wtj76 + e2ec01efcd8a mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 38 seconds myservice.1.8w7c4ttzr6zcb9sjsqyhwp3yl + ``` + + On node 2: + + ```bash + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + ``` diff --git a/components/engine/docs/extend/plugins_volume.md b/components/engine/docs/extend/plugins_volume.md new file mode 100644 index 0000000000..807ab5a486 --- /dev/null +++ b/components/engine/docs/extend/plugins_volume.md @@ -0,0 +1,360 @@ +--- +title: "Volume plugins" +description: "How to manage data with external volume plugins" +keywords: "Examples, Usage, volume, docker, data, volumes, plugin, api" +--- + + + +# Write a volume plugin + +Docker Engine volume plugins enable Engine deployments to be integrated with +external storage systems such as Amazon EBS, and enable data volumes to persist +beyond the lifetime of a single Docker host. See the +[plugin documentation](legacy_plugins.md) for more information. + +## Changelog + +### 1.13.0 + +- If used as part of the v2 plugin architecture, mountpoints that are part of + paths returned by the plugin must be mounted under the directory specified by + `PropagatedMount` in the plugin configuration + ([#26398](https://github.com/docker/docker/pull/26398)) + +### 1.12.0 + +- Add `Status` field to `VolumeDriver.Get` response + ([#21006](https://github.com/docker/docker/pull/21006#)) +- Add `VolumeDriver.Capabilities` to get capabilities of the volume driver + ([#22077](https://github.com/docker/docker/pull/22077)) + +### 1.10.0 + +- Add `VolumeDriver.Get` which gets the details about the volume + ([#16534](https://github.com/docker/docker/pull/16534)) +- Add `VolumeDriver.List` which lists all volumes owned by the driver + ([#16534](https://github.com/docker/docker/pull/16534)) + +### 1.8.0 + +- Initial support for volume driver plugins + ([#14659](https://github.com/docker/docker/pull/14659)) + +## Command-line changes + +To give a container access to a volume, use the `--volume` and `--volume-driver` +flags on the `docker container run` command. The `--volume` (or `-v`) flag +accepts a volume name and path on the host, and the `--volume-driver` flag +accepts a driver type. + +```bash +$ docker volume create --driver=flocker volumename + +$ docker container run -it --volume volumename:/data busybox sh +``` + +### `--volume` + +The `--volume` (or `-v`) flag takes a value that is in the format +`:`. The two parts of the value are +separated by a colon (`:`) character. + +- The volume name is a human-readable name for the volume, and cannot begin with + a `/` character. It is referred to as `volume_name` in the rest of this topic. +- The `Mountpoint` is the path on the host (v1) or in the plugin (v2) where the + volume has been made available. + +### `volumedriver` + +Specifying a `volumedriver` in conjunction with a `volumename` allows you to +use plugins such as [Flocker](https://github.com/ScatterHQ/flocker) to manage +volumes external to a single host, such as those on EBS. + +## Create a VolumeDriver + +The container creation endpoint (`/containers/create`) accepts a `VolumeDriver` +field of type `string` allowing to specify the name of the driver. If not +specified, it defaults to `"local"` (the default driver for local volumes). + +## Volume plugin protocol + +If a plugin registers itself as a `VolumeDriver` when activated, it must +provide the Docker Daemon with writeable paths on the host filesystem. The Docker +daemon provides these paths to containers to consume. The Docker daemon makes +the volumes available by bind-mounting the provided paths into the containers. + +> **Note**: Volume plugins should *not* write data to the `/var/lib/docker/` +> directory, including `/var/lib/docker/volumes`. The `/var/lib/docker/` +> directory is reserved for Docker. + +### `/VolumeDriver.Create` + +**Request**: +```json +{ + "Name": "volume_name", + "Opts": {} +} +``` + +Instruct the plugin that the user wants to create a volume, given a user +specified volume name. The plugin does not need to actually manifest the +volume on the filesystem yet (until `Mount` is called). +`Opts` is a map of driver specific options passed through from the user request. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + +### `/VolumeDriver.Remove` + +**Request**: +```json +{ + "Name": "volume_name" +} +``` + +Delete the specified volume from disk. This request is issued when a user +invokes `docker rm -v` to remove volumes associated with a container. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + +### `/VolumeDriver.Mount` + +**Request**: +```json +{ + "Name": "volume_name", + "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" +} +``` + +Docker requires the plugin to provide a volume, given a user specified volume +name. `Mount` is called once per container start. If the same `volume_name` is requested +more than once, the plugin may need to keep track of each new mount request and provision +at the first mount request and deprovision at the last corresponding unmount request. + +`ID` is a unique ID for the caller that is requesting the mount. + +**Response**: + +- **v1**: + + ```json + { + "Mountpoint": "/path/to/directory/on/host", + "Err": "" + } + ``` + +- **v2**: + + ```json + { + "Mountpoint": "/path/under/PropagatedMount", + "Err": "" + } + ``` + +`Mountpoint` is the path on the host (v1) or in the plugin (v2) where the volume +has been made available. + +`Err` is either empty or contains an error string. + +### `/VolumeDriver.Path` + +**Request**: + +```json +{ + "Name": "volume_name" +} +``` + +Request the path to the volume with the given `volume_name`. + +**Response**: + +- **v1**: + + ```json + { + "Mountpoin": "/path/to/directory/on/host", + "Err": "" + } + ``` + +- **v2**: + + ```json + { + "Mountpoint": "/path/under/PropagatedMount", + "Err": "" + } + ``` + +Respond with the path on the host (v1) or inside the plugin (v2) where the +volume has been made available, and/or a string error if an error occurred. + +`Mountpoint` is optional. However, the plugin may be queried again later if one +is not provided. + +### `/VolumeDriver.Unmount` + +**Request**: +```json +{ + "Name": "volume_name", + "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" +} +``` + +Docker is no longer using the named volume. `Unmount` is called once per +container stop. Plugin may deduce that it is safe to deprovision the volume at +this point. + +`ID` is a unique ID for the caller that is requesting the mount. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + + +### `/VolumeDriver.Get` + +**Request**: +```json +{ + "Name": "volume_name" +} +``` + +Get info about `volume_name`. + + +**Response**: + +- **v1**: + + ```json + { + "Volume": { + "Name": "volume_name", + "Mountpoint": "/path/to/directory/on/host", + "Status": {} + }, + "Err": "" + } + ``` + +- **v2**: + + ```json + { + "Volume": { + "Name": "volume_name", + "Mountpoint": "/path/under/PropagatedMount", + "Status": {} + }, + "Err": "" + } + ``` + +Respond with a string error if an error occurred. `Mountpoint` and `Status` are +optional. + + +### /VolumeDriver.List + +**Request**: +```json +{} +``` + +Get the list of volumes registered with the plugin. + +**Response**: + +- **v1**: + + ```json + { + "Volumes": [ + { + "Name": "volume_name", + "Mountpoint": "/path/to/directory/on/host" + } + ], + "Err": "" + } + ``` + +- **v2**: + + ```json + { + "Volumes": [ + { + "Name": "volume_name", + "Mountpoint": "/path/under/PropagatedMount" + } + ], + "Err": "" + } + ``` + + +Respond with a string error if an error occurred. `Mountpoint` is optional. + +### /VolumeDriver.Capabilities + +**Request**: +```json +{} +``` + +Get the list of capabilities the driver supports. + +The driver is not required to implement `Capabilities`. If it is not +implemented, the default values are used. + +**Response**: +```json +{ + "Capabilities": { + "Scope": "global" + } +} +``` + +Supported scopes are `global` and `local`. Any other value in `Scope` will be +ignored, and `local` is used. `Scope` allows cluster managers to handle the +volume in different ways. For instance, a scope of `global`, signals to the +cluster manager that it only needs to create the volume once instead of on each +Docker host. More capabilities may be added in the future. diff --git a/components/engine/docs/reference/builder.md b/components/engine/docs/reference/builder.md new file mode 100644 index 0000000000..2571511b64 --- /dev/null +++ b/components/engine/docs/reference/builder.md @@ -0,0 +1,1849 @@ +--- +title: "Dockerfile reference" +description: "Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image." +keywords: "builder, docker, Dockerfile, automation, image creation" +redirect_from: +- /reference/builder/ +--- + + + +# Dockerfile reference + +Docker can build images automatically by reading the instructions from a +`Dockerfile`. A `Dockerfile` is a text document that contains all the commands a +user could call on the command line to assemble an image. Using `docker build` +users can create an automated build that executes several command-line +instructions in succession. + +This page describes the commands you can use in a `Dockerfile`. When you are +done reading this page, refer to the [`Dockerfile` Best +Practices](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/) for a tip-oriented guide. + +## Usage + +The [`docker build`](commandline/build.md) command builds an image from +a `Dockerfile` and a *context*. The build's context is the files at a specified +location `PATH` or `URL`. The `PATH` is a directory on your local filesystem. +The `URL` is a Git repository location. + +A context is processed recursively. So, a `PATH` includes any subdirectories and +the `URL` includes the repository and its submodules. A simple build command +that uses the current directory as context: + + $ docker build . + Sending build context to Docker daemon 6.51 MB + ... + +The build is run by the Docker daemon, not by the CLI. The first thing a build +process does is send the entire context (recursively) to the daemon. In most +cases, it's best to start with an empty directory as context and keep your +Dockerfile in that directory. Add only the files needed for building the +Dockerfile. + +>**Warning**: Do not use your root directory, `/`, as the `PATH` as it causes +>the build to transfer the entire contents of your hard drive to the Docker +>daemon. + +To use a file in the build context, the `Dockerfile` refers to the file specified +in an instruction, for example, a `COPY` instruction. To increase the build's +performance, exclude files and directories by adding a `.dockerignore` file to +the context directory. For information about how to [create a `.dockerignore` +file](#dockerignore-file) see the documentation on this page. + +Traditionally, the `Dockerfile` is called `Dockerfile` and located in the root +of the context. You use the `-f` flag with `docker build` to point to a Dockerfile +anywhere in your file system. + + $ docker build -f /path/to/a/Dockerfile . + +You can specify a repository and tag at which to save the new image if +the build succeeds: + + $ docker build -t shykes/myapp . + +To tag the image into multiple repositories after the build, +add multiple `-t` parameters when you run the `build` command: + + $ docker build -t shykes/myapp:1.0.2 -t shykes/myapp:latest . + +Before the Docker daemon runs the instructions in the `Dockerfile`, it performs +a preliminary validation of the `Dockerfile` and returns an error if the syntax is incorrect: + + $ docker build -t test/myapp . + Sending build context to Docker daemon 2.048 kB + Error response from daemon: Unknown instruction: RUNCMD + +The Docker daemon runs the instructions in the `Dockerfile` one-by-one, +committing the result of each instruction +to a new image if necessary, before finally outputting the ID of your +new image. The Docker daemon will automatically clean up the context you +sent. + +Note that each instruction is run independently, and causes a new image +to be created - so `RUN cd /tmp` will not have any effect on the next +instructions. + +Whenever possible, Docker will re-use the intermediate images (cache), +to accelerate the `docker build` process significantly. This is indicated by +the `Using cache` message in the console output. +(For more information, see the [Build cache section](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache)) in the +`Dockerfile` best practices guide: + + $ docker build -t svendowideit/ambassador . + Sending build context to Docker daemon 15.36 kB + Step 1/4 : FROM alpine:3.2 + ---> 31f630c65071 + Step 2/4 : MAINTAINER SvenDowideit@home.org.au + ---> Using cache + ---> 2a1c91448f5f + Step 3/4 : RUN apk update && apk add socat && rm -r /var/cache/ + ---> Using cache + ---> 21ed6e7fbb73 + Step 4/4 : CMD env | grep _TCP= | (sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' && echo wait) | sh + ---> Using cache + ---> 7ea8aef582cc + Successfully built 7ea8aef582cc + +Build cache is only used from images that have a local parent chain. This means +that these images were created by previous builds or the whole chain of images +was loaded with `docker load`. If you wish to use build cache of a specific +image you can specify it with `--cache-from` option. Images specified with +`--cache-from` do not need to have a parent chain and may be pulled from other +registries. + +When you're done with your build, you're ready to look into [*Pushing a +repository to its registry*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). + +## Format + +Here is the format of the `Dockerfile`: + +```Dockerfile +# Comment +INSTRUCTION arguments +``` + +The instruction is not case-sensitive. However, convention is for them to +be UPPERCASE to distinguish them from arguments more easily. + + +Docker runs instructions in a `Dockerfile` in order. A `Dockerfile` **must +start with a \`FROM\` instruction**. The `FROM` instruction specifies the [*Base +Image*](glossary.md#base-image) from which you are building. `FROM` may only be +proceeded by one or more `ARG` instructions, which declare arguments that are used +in `FROM` lines in the `Dockerfile`. + +Docker treats lines that *begin* with `#` as a comment, unless the line is +a valid [parser directive](#parser-directives). A `#` marker anywhere +else in a line is treated as an argument. This allows statements like: + +```Dockerfile +# Comment +RUN echo 'we are running some # of cool things' +``` + +Line continuation characters are not supported in comments. + +## Parser directives + +Parser directives are optional, and affect the way in which subsequent lines +in a `Dockerfile` are handled. Parser directives do not add layers to the build, +and will not be shown as a build step. Parser directives are written as a +special type of comment in the form `# directive=value`. A single directive +may only be used once. + +Once a comment, empty line or builder instruction has been processed, Docker +no longer looks for parser directives. Instead it treats anything formatted +as a parser directive as a comment and does not attempt to validate if it might +be a parser directive. Therefore, all parser directives must be at the very +top of a `Dockerfile`. + +Parser directives are not case-sensitive. However, convention is for them to +be lowercase. Convention is also to include a blank line following any +parser directives. Line continuation characters are not supported in parser +directives. + +Due to these rules, the following examples are all invalid: + +Invalid due to line continuation: + +```Dockerfile +# direc \ +tive=value +``` + +Invalid due to appearing twice: + +```Dockerfile +# directive=value1 +# directive=value2 + +FROM ImageName +``` + +Treated as a comment due to appearing after a builder instruction: + +```Dockerfile +FROM ImageName +# directive=value +``` + +Treated as a comment due to appearing after a comment which is not a parser +directive: + +```Dockerfile +# About my dockerfile +# directive=value +FROM ImageName +``` + +The unknown directive is treated as a comment due to not being recognized. In +addition, the known directive is treated as a comment due to appearing after +a comment which is not a parser directive. + +```Dockerfile +# unknowndirective=value +# knowndirective=value +``` + +Non line-breaking whitespace is permitted in a parser directive. Hence, the +following lines are all treated identically: + +```Dockerfile +#directive=value +# directive =value +# directive= value +# directive = value +# dIrEcTiVe=value +``` + +The following parser directive is supported: + +* `escape` + +## escape + + # escape=\ (backslash) + +Or + + # escape=` (backtick) + +The `escape` directive sets the character used to escape characters in a +`Dockerfile`. If not specified, the default escape character is `\`. + +The escape character is used both to escape characters in a line, and to +escape a newline. This allows a `Dockerfile` instruction to +span multiple lines. Note that regardless of whether the `escape` parser +directive is included in a `Dockerfile`, *escaping is not performed in +a `RUN` command, except at the end of a line.* + +Setting the escape character to `` ` `` is especially useful on +`Windows`, where `\` is the directory path separator. `` ` `` is consistent +with [Windows PowerShell](https://technet.microsoft.com/en-us/library/hh847755.aspx). + +Consider the following example which would fail in a non-obvious way on +`Windows`. The second `\` at the end of the second line would be interpreted as an +escape for the newline, instead of a target of the escape from the first `\`. +Similarly, the `\` at the end of the third line would, assuming it was actually +handled as an instruction, cause it be treated as a line continuation. The result +of this dockerfile is that second and third lines are considered a single +instruction: + +```Dockerfile +FROM microsoft/nanoserver +COPY testfile.txt c:\\ +RUN dir c:\ +``` + +Results in: + + PS C:\John> docker build -t cmd . + Sending build context to Docker daemon 3.072 kB + Step 1/2 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/2 : COPY testfile.txt c:\RUN dir c: + GetFileAttributesEx c:RUN: The system cannot find the file specified. + PS C:\John> + +One solution to the above would be to use `/` as the target of both the `COPY` +instruction, and `dir`. However, this syntax is, at best, confusing as it is not +natural for paths on `Windows`, and at worst, error prone as not all commands on +`Windows` support `/` as the path separator. + +By adding the `escape` parser directive, the following `Dockerfile` succeeds as +expected with the use of natural platform semantics for file paths on `Windows`: + + # escape=` + + FROM microsoft/nanoserver + COPY testfile.txt c:\ + RUN dir c:\ + +Results in: + + PS C:\John> docker build -t succeeds --no-cache=true . + Sending build context to Docker daemon 3.072 kB + Step 1/3 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/3 : COPY testfile.txt c:\ + ---> 96655de338de + Removing intermediate container 4db9acbb1682 + Step 3/3 : RUN dir c:\ + ---> Running in a2c157f842f5 + Volume in drive C has no label. + Volume Serial Number is 7E6D-E0F7 + + Directory of c:\ + + 10/05/2016 05:04 PM 1,894 License.txt + 10/05/2016 02:22 PM Program Files + 10/05/2016 02:14 PM Program Files (x86) + 10/28/2016 11:18 AM 62 testfile.txt + 10/28/2016 11:20 AM Users + 10/28/2016 11:20 AM Windows + 2 File(s) 1,956 bytes + 4 Dir(s) 21,259,096,064 bytes free + ---> 01c7f3bef04f + Removing intermediate container a2c157f842f5 + Successfully built 01c7f3bef04f + PS C:\John> + +## Environment replacement + +Environment variables (declared with [the `ENV` statement](#env)) can also be +used in certain instructions as variables to be interpreted by the +`Dockerfile`. Escapes are also handled for including variable-like syntax +into a statement literally. + +Environment variables are notated in the `Dockerfile` either with +`$variable_name` or `${variable_name}`. They are treated equivalently and the +brace syntax is typically used to address issues with variable names with no +whitespace, like `${foo}_bar`. + +The `${variable_name}` syntax also supports a few of the standard `bash` +modifiers as specified below: + +* `${variable:-word}` indicates that if `variable` is set then the result + will be that value. If `variable` is not set then `word` will be the result. +* `${variable:+word}` indicates that if `variable` is set then `word` will be + the result, otherwise the result is the empty string. + +In all cases, `word` can be any string, including additional environment +variables. + +Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`, +for example, will translate to `$foo` and `${foo}` literals respectively. + +Example (parsed representation is displayed after the `#`): + + FROM busybox + ENV foo /bar + WORKDIR ${foo} # WORKDIR /bar + ADD . $foo # ADD . /bar + COPY \$foo /quux # COPY $foo /quux + +Environment variables are supported by the following list of instructions in +the `Dockerfile`: + +* `ADD` +* `COPY` +* `ENV` +* `EXPOSE` +* `FROM` +* `LABEL` +* `STOPSIGNAL` +* `USER` +* `VOLUME` +* `WORKDIR` + +as well as: + +* `ONBUILD` (when combined with one of the supported instructions above) + +> **Note**: +> prior to 1.4, `ONBUILD` instructions did **NOT** support environment +> variable, even when combined with any of the instructions listed above. + +Environment variable substitution will use the same value for each variable +throughout the entire instruction. In other words, in this example: + + ENV abc=hello + ENV abc=bye def=$abc + ENV ghi=$abc + +will result in `def` having a value of `hello`, not `bye`. However, +`ghi` will have a value of `bye` because it is not part of the same instruction +that set `abc` to `bye`. + +## .dockerignore file + +Before the docker CLI sends the context to the docker daemon, it looks +for a file named `.dockerignore` in the root directory of the context. +If this file exists, the CLI modifies the context to exclude files and +directories that match patterns in it. This helps to avoid +unnecessarily sending large or sensitive files and directories to the +daemon and potentially adding them to images using `ADD` or `COPY`. + +The CLI interprets the `.dockerignore` file as a newline-separated +list of patterns similar to the file globs of Unix shells. For the +purposes of matching, the root of the context is considered to be both +the working and the root directory. For example, the patterns +`/foo/bar` and `foo/bar` both exclude a file or directory named `bar` +in the `foo` subdirectory of `PATH` or in the root of the git +repository located at `URL`. Neither excludes anything else. + +If a line in `.dockerignore` file starts with `#` in column 1, then this line is +considered as a comment and is ignored before interpreted by the CLI. + +Here is an example `.dockerignore` file: + +``` +# comment +*/temp* +*/*/temp* +temp? +``` + +This file causes the following build behavior: + +| Rule | Behavior | +|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `# comment` | Ignored. | +| `*/temp*` | Exclude files and directories whose names start with `temp` in any immediate subdirectory of the root. For example, the plain file `/somedir/temporary.txt` is excluded, as is the directory `/somedir/temp`. | +| `*/*/temp*` | Exclude files and directories starting with `temp` from any subdirectory that is two levels below the root. For example, `/somedir/subdir/temporary.txt` is excluded. | +| `temp?` | Exclude files and directories in the root directory whose names are a one-character extension of `temp`. For example, `/tempa` and `/tempb` are excluded. + + +Matching is done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. A +preprocessing step removes leading and trailing whitespace and +eliminates `.` and `..` elements using Go's +[filepath.Clean](http://golang.org/pkg/path/filepath/#Clean). Lines +that are blank after preprocessing are ignored. + +Beyond Go's filepath.Match rules, Docker also supports a special +wildcard string `**` that matches any number of directories (including +zero). For example, `**/*.go` will exclude all files that end with `.go` +that are found in all directories, including the root of the build context. + +Lines starting with `!` (exclamation mark) can be used to make exceptions +to exclusions. The following is an example `.dockerignore` file that +uses this mechanism: + +``` + *.md + !README.md +``` + +All markdown files *except* `README.md` are excluded from the context. + +The placement of `!` exception rules influences the behavior: the last +line of the `.dockerignore` that matches a particular file determines +whether it is included or excluded. Consider the following example: + +``` + *.md + !README*.md + README-secret.md +``` + +No markdown files are included in the context except README files other than +`README-secret.md`. + +Now consider this example: + +``` + *.md + README-secret.md + !README*.md +``` + +All of the README files are included. The middle line has no effect because +`!README*.md` matches `README-secret.md` and comes last. + +You can even use the `.dockerignore` file to exclude the `Dockerfile` +and `.dockerignore` files. These files are still sent to the daemon +because it needs them to do its job. But the `ADD` and `COPY` instructions +do not copy them to the image. + +Finally, you may want to specify which files to include in the +context, rather than which to exclude. To achieve this, specify `*` as +the first pattern, followed by one or more `!` exception patterns. + +**Note**: For historical reasons, the pattern `.` is ignored. + +## FROM + + FROM [AS ] + +Or + + FROM [:] [AS ] + +Or + + FROM [@] [AS ] + +The `FROM` instruction initializes a new build stage and sets the +[*Base Image*](glossary.md#base-image) for subsequent instructions. As such, a +valid `Dockerfile` must start with a `FROM` instruction. The image can be +any valid image – it is especially easy to start by **pulling an image** from +the [*Public Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/). + +- `ARG` is the only instruction that may proceed `FROM` in the `Dockerfile`. + See [Understand how ARG and FROM interact](#understand-how-arg-and-from-interact). + +- `FROM` can appear multiple times within a single `Dockerfile` to + create multiple images or use one build stage as a dependency for another. + Simply make a note of the last image ID output by the commit before each new + `FROM` instruction. Each `FROM` instruction clears any state created by previous + instructions. + +- Optionally a name can be given to a new build stage by adding `AS name` to the + `FROM` instruction. The name can be used in subsequent `FROM` and + `COPY --from=` instructions to refer to the image built in this stage. + +- The `tag` or `digest` values are optional. If you omit either of them, the + builder assumes a `latest` tag by default. The builder returns an error if it + cannot find the `tag` value. + +### Understand how ARG and FROM interact + +`FROM` instructions support variables that are declared by any `ARG` +instructions that occur before the first `FROM`. + +```Dockerfile +ARG CODE_VERSION=latest +FROM base:${CODE_VERSION} +CMD /code/run-app + +FROM extras:${CODE_VERSION} +CMD /code/run-extras +``` + +To use the default value of an `ARG` declared before the first `FROM` use an +`ARG` instruction without a value: + +```Dockerfile +ARG SETTINGS=default + +FROM busybox +ARG SETTINGS + +``` + +## RUN + +RUN has 2 forms: + +- `RUN ` (*shell* form, the command is run in a shell, which by +default is `/bin/sh -c` on Linux or `cmd /S /C` on Windows) +- `RUN ["executable", "param1", "param2"]` (*exec* form) + +The `RUN` instruction will execute any commands in a new layer on top of the +current image and commit the results. The resulting committed image will be +used for the next step in the `Dockerfile`. + +Layering `RUN` instructions and generating commits conforms to the core +concepts of Docker where commits are cheap and containers can be created from +any point in an image's history, much like source control. + +The *exec* form makes it possible to avoid shell string munging, and to `RUN` +commands using a base image that does not contain the specified shell executable. + +The default shell for the *shell* form can be changed using the `SHELL` +command. + +In the *shell* form you can use a `\` (backslash) to continue a single +RUN instruction onto the next line. For example, consider these two lines: + +``` +RUN /bin/bash -c 'source $HOME/.bashrc; \ +echo $HOME' +``` +Together they are equivalent to this single line: + +``` +RUN /bin/bash -c 'source $HOME/.bashrc; echo $HOME' +``` + +> **Note**: +> To use a different shell, other than '/bin/sh', use the *exec* form +> passing in the desired shell. For example, +> `RUN ["/bin/bash", "-c", "echo hello"]` + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `RUN [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. +> +> **Note**: +> In the *JSON* form, it is necessary to escape backslashes. This is +> particularly relevant on Windows where the backslash is the path separator. +> The following line would otherwise be treated as *shell* form due to not +> being valid JSON, and fail in an unexpected way: +> `RUN ["c:\windows\system32\tasklist.exe"]` +> The correct syntax for this example is: +> `RUN ["c:\\windows\\system32\\tasklist.exe"]` + +The cache for `RUN` instructions isn't invalidated automatically during +the next build. The cache for an instruction like +`RUN apt-get dist-upgrade -y` will be reused during the next build. The +cache for `RUN` instructions can be invalidated by using the `--no-cache` +flag, for example `docker build --no-cache`. + +See the [`Dockerfile` Best Practices +guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. + +The cache for `RUN` instructions can be invalidated by `ADD` instructions. See +[below](#add) for details. + +### Known issues (RUN) + +- [Issue 783](https://github.com/docker/docker/issues/783) is about file + permissions problems that can occur when using the AUFS file system. You + might notice it during an attempt to `rm` a file, for example. + + For systems that have recent aufs version (i.e., `dirperm1` mount option can + be set), docker will attempt to fix the issue automatically by mounting + the layers with `dirperm1` option. More details on `dirperm1` option can be + found at [`aufs` man page](https://github.com/sfjro/aufs3-linux/tree/aufs3.18/Documentation/filesystems/aufs) + + If your system doesn't have support for `dirperm1`, the issue describes a workaround. + +## CMD + +The `CMD` instruction has three forms: + +- `CMD ["executable","param1","param2"]` (*exec* form, this is the preferred form) +- `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*) +- `CMD command param1 param2` (*shell* form) + +There can only be one `CMD` instruction in a `Dockerfile`. If you list more than one `CMD` +then only the last `CMD` will take effect. + +**The main purpose of a `CMD` is to provide defaults for an executing +container.** These defaults can include an executable, or they can omit +the executable, in which case you must specify an `ENTRYPOINT` +instruction as well. + +> **Note**: +> If `CMD` is used to provide default arguments for the `ENTRYPOINT` +> instruction, both the `CMD` and `ENTRYPOINT` instructions should be specified +> with the JSON array format. + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `CMD [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. + +When used in the shell or exec formats, the `CMD` instruction sets the command +to be executed when running the image. + +If you use the *shell* form of the `CMD`, then the `` will execute in +`/bin/sh -c`: + + FROM ubuntu + CMD echo "This is a test." | wc - + +If you want to **run your** `` **without a shell** then you must +express the command as a JSON array and give the full path to the executable. +**This array form is the preferred format of `CMD`.** Any additional parameters +must be individually expressed as strings in the array: + + FROM ubuntu + CMD ["/usr/bin/wc","--help"] + +If you would like your container to run the same executable every time, then +you should consider using `ENTRYPOINT` in combination with `CMD`. See +[*ENTRYPOINT*](#entrypoint). + +If the user specifies arguments to `docker run` then they will override the +default specified in `CMD`. + +> **Note**: +> Don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits +> the result; `CMD` does not execute anything at build time, but specifies +> the intended command for the image. + +## LABEL + + LABEL = = = ... + +The `LABEL` instruction adds metadata to an image. A `LABEL` is a +key-value pair. To include spaces within a `LABEL` value, use quotes and +backslashes as you would in command-line parsing. A few usage examples: + + LABEL "com.example.vendor"="ACME Incorporated" + LABEL com.example.label-with-value="foo" + LABEL version="1.0" + LABEL description="This text illustrates \ + that label-values can span multiple lines." + +An image can have more than one label. To specify multiple labels, +Docker recommends combining labels into a single `LABEL` instruction where +possible. Each `LABEL` instruction produces a new layer which can result in an +inefficient image if you use many labels. This example results in a single image +layer. + + LABEL multi.label1="value1" multi.label2="value2" other="value3" + +The above can also be written as: + + LABEL multi.label1="value1" \ + multi.label2="value2" \ + other="value3" + +Labels are additive including `LABEL`s in `FROM` images. If Docker +encounters a label/key that already exists, the new value overrides any previous +labels with identical keys. + +To view an image's labels, use the `docker inspect` command. + + "Labels": { + "com.example.vendor": "ACME Incorporated" + "com.example.label-with-value": "foo", + "version": "1.0", + "description": "This text illustrates that label-values can span multiple lines.", + "multi.label1": "value1", + "multi.label2": "value2", + "other": "value3" + }, + +## MAINTAINER (deprecated) + + MAINTAINER + +The `MAINTAINER` instruction sets the *Author* field of the generated images. +The `LABEL` instruction is a much more flexible version of this and you should use +it instead, as it enables setting any metadata you require, and can be viewed +easily, for example with `docker inspect`. To set a label corresponding to the +`MAINTAINER` field you could use: + + LABEL maintainer="SvenDowideit@home.org.au" + +This will then be visible from `docker inspect` with the other labels. + +## EXPOSE + + EXPOSE [...] + +The `EXPOSE` instruction informs Docker that the container listens on the +specified network ports at runtime. `EXPOSE` does not make the ports of the +container accessible to the host. To do that, you must use either the `-p` flag +to publish a range of ports or the `-P` flag to publish all of the exposed +ports. You can expose one port number and publish it externally under another +number. + +To set up port redirection on the host system, see [using the -P +flag](run.md#expose-incoming-ports). The Docker network feature supports +creating networks without the need to expose ports within the network, for +detailed information see the [overview of this +feature](https://docs.docker.com/engine/userguide/networking/)). + +## ENV + + ENV + ENV = ... + +The `ENV` instruction sets the environment variable `` to the value +``. This value will be in the environment of all "descendant" +`Dockerfile` commands and can be [replaced inline](#environment-replacement) in +many as well. + +The `ENV` instruction has two forms. The first form, `ENV `, +will set a single variable to a value. The entire string after the first +space will be treated as the `` - including characters such as +spaces and quotes. + +The second form, `ENV = ...`, allows for multiple variables to +be set at one time. Notice that the second form uses the equals sign (=) +in the syntax, while the first form does not. Like command line parsing, +quotes and backslashes can be used to include spaces within values. + +For example: + + ENV myName="John Doe" myDog=Rex\ The\ Dog \ + myCat=fluffy + +and + + ENV myName John Doe + ENV myDog Rex The Dog + ENV myCat fluffy + +will yield the same net results in the final image, but the first form +is preferred because it produces a single cache layer. + +The environment variables set using `ENV` will persist when a container is run +from the resulting image. You can view the values using `docker inspect`, and +change them using `docker run --env =`. + +> **Note**: +> Environment persistence can cause unexpected side effects. For example, +> setting `ENV DEBIAN_FRONTEND noninteractive` may confuse apt-get +> users on a Debian-based image. To set a value for a single command, use +> `RUN = `. + +## ADD + +ADD has two forms: + +- `ADD ... ` +- `ADD ["",... ""]` (this form is required for paths containing +whitespace) + +The `ADD` instruction copies new files, directories or remote file URLs from `` +and adds them to the filesystem of the image at the path ``. + +Multiple `` resource may be specified but if they are files or +directories then they must be relative to the source directory that is +being built (the context of the build). + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: + + ADD hom* /mydir/ # adds all files starting with "hom" + ADD hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + + ADD test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ + ADD test /absoluteDir/ # adds "test" to /absoluteDir/ + +When adding files or directories that contain special characters (such as `[` +and `]`), you need to escape those paths following the Golang rules to prevent +them from being treated as a matching pattern. For example, to add a file +named `arr[0].txt`, use the following; + + ADD arr[[]0].txt /mydir/ # copy a file named "arr[0].txt" to /mydir/ + + +All new files and directories are created with a UID and GID of 0. + +In the case where `` is a remote file URL, the destination will +have permissions of 600. If the remote file being retrieved has an HTTP +`Last-Modified` header, the timestamp from that header will be used +to set the `mtime` on the destination file. However, like any other file +processed during an `ADD`, `mtime` will not be included in the determination +of whether or not the file has changed and the cache should be updated. + +> **Note**: +> If you build by passing a `Dockerfile` through STDIN (`docker +> build - < somefile`), there is no build context, so the `Dockerfile` +> can only contain a URL based `ADD` instruction. You can also pass a +> compressed archive through STDIN: (`docker build - < archive.tar.gz`), +> the `Dockerfile` at the root of the archive and the rest of the +> archive will be used as the context of the build. + +> **Note**: +> If your URL files are protected using authentication, you +> will need to use `RUN wget`, `RUN curl` or use another tool from +> within the container as the `ADD` instruction does not support +> authentication. + +> **Note**: +> The first encountered `ADD` instruction will invalidate the cache for all +> following instructions from the Dockerfile if the contents of `` have +> changed. This includes invalidating the cache for `RUN` instructions. +> See the [`Dockerfile` Best Practices +guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. + + +`ADD` obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `ADD ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a URL and `` does not end with a trailing slash, then a + file is downloaded from the URL and copied to ``. + +- If `` is a URL and `` does end with a trailing slash, then the + filename is inferred from the URL and the file is downloaded to + `/`. For instance, `ADD http://example.com/foobar /` would + create the file `/foobar`. The URL must have a nontrivial path so that an + appropriate filename can be discovered in this case (`http://example.com` + will not work). + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. + +> **Note**: +> The directory itself is not copied, just its contents. + +- If `` is a *local* tar archive in a recognized compression format + (identity, gzip, bzip2 or xz) then it is unpacked as a directory. Resources + from *remote* URLs are **not** decompressed. When a directory is copied or + unpacked, it has the same behavior as `tar -x`, the result is the union of: + + 1. Whatever existed at the destination path and + 2. The contents of the source tree, with conflicts resolved in favor + of "2." on a file-by-file basis. + + > **Note**: + > Whether a file is identified as a recognized compression format or not + > is done solely based on the contents of the file, not the name of the file. + > For example, if an empty file happens to end with `.tar.gz` this will not + > be recognized as a compressed file and **will not** generate any kind of + > decompression error message, rather the file will simply be copied to the + > destination. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +## COPY + +COPY has two forms: + +- `COPY ... ` +- `COPY ["",... ""]` (this form is required for paths containing +whitespace) + +The `COPY` instruction copies new files or directories from `` +and adds them to the filesystem of the container at the path ``. + +Multiple `` resource may be specified but they must be relative +to the source directory that is being built (the context of the build). + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: + + COPY hom* /mydir/ # adds all files starting with "hom" + COPY hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + + COPY test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ + COPY test /absoluteDir/ # adds "test" to /absoluteDir/ + + +When copying files or directories that contain special characters (such as `[` +and `]`), you need to escape those paths following the Golang rules to prevent +them from being treated as a matching pattern. For example, to copy a file +named `arr[0].txt`, use the following; + + COPY arr[[]0].txt /mydir/ # copy a file named "arr[0].txt" to /mydir/ + +All new files and directories are created with a UID and GID of 0. + +> **Note**: +> If you build using STDIN (`docker build - < somefile`), there is no +> build context, so `COPY` can't be used. + +Optionally `COPY` accepts a flag `--from=` that can be used to set +the source location to a previous build stage (created with `FROM .. AS `) +that will be used instead of a build context sent by the user. The flag also +accepts a numeric index assigned for all previous build stages started with +`FROM` instruction. In case a build stage with a specified name can't be found an +image with the same name is attempted to be used instead. + +`COPY` obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `COPY ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. + +> **Note**: +> The directory itself is not copied, just its contents. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +## ENTRYPOINT + +ENTRYPOINT has two forms: + +- `ENTRYPOINT ["executable", "param1", "param2"]` + (*exec* form, preferred) +- `ENTRYPOINT command param1 param2` + (*shell* form) + +An `ENTRYPOINT` allows you to configure a container that will run as an executable. + +For example, the following will start nginx with its default content, listening +on port 80: + + docker run -i -t --rm -p 80:80 nginx + +Command line arguments to `docker run ` will be appended after all +elements in an *exec* form `ENTRYPOINT`, and will override all elements specified +using `CMD`. +This allows arguments to be passed to the entry point, i.e., `docker run -d` +will pass the `-d` argument to the entry point. +You can override the `ENTRYPOINT` instruction using the `docker run --entrypoint` +flag. + +The *shell* form prevents any `CMD` or `run` command line arguments from being +used, but has the disadvantage that your `ENTRYPOINT` will be started as a +subcommand of `/bin/sh -c`, which does not pass signals. +This means that the executable will not be the container's `PID 1` - and +will _not_ receive Unix signals - so your executable will not receive a +`SIGTERM` from `docker stop `. + +Only the last `ENTRYPOINT` instruction in the `Dockerfile` will have an effect. + +### Exec form ENTRYPOINT example + +You can use the *exec* form of `ENTRYPOINT` to set fairly stable default commands +and arguments and then use either form of `CMD` to set additional defaults that +are more likely to be changed. + + FROM ubuntu + ENTRYPOINT ["top", "-b"] + CMD ["-c"] + +When you run the container, you can see that `top` is the only process: + + $ docker run -it --rm --name test top -H + top - 08:25:00 up 7:27, 0 users, load average: 0.00, 0.01, 0.05 + Threads: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + %Cpu(s): 0.1 us, 0.1 sy, 0.0 ni, 99.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st + KiB Mem: 2056668 total, 1616832 used, 439836 free, 99352 buffers + KiB Swap: 1441840 total, 0 used, 1441840 free. 1324440 cached Mem + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 19744 2336 2080 R 0.0 0.1 0:00.04 top + +To examine the result further, you can use `docker exec`: + + $ docker exec -it test ps aux + USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND + root 1 2.6 0.1 19752 2352 ? Ss+ 08:24 0:00 top -b -H + root 7 0.0 0.1 15572 2164 ? R+ 08:25 0:00 ps aux + +And you can gracefully request `top` to shut down using `docker stop test`. + +The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the +foreground (i.e., as `PID 1`): + +``` +FROM debian:stable +RUN apt-get update && apt-get install -y --force-yes apache2 +EXPOSE 80 443 +VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"] +ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] +``` + +If you need to write a starter script for a single executable, you can ensure that +the final executable receives the Unix signals by using `exec` and `gosu` +commands: + +```bash +#!/usr/bin/env bash +set -e + +if [ "$1" = 'postgres' ]; then + chown -R postgres "$PGDATA" + + if [ -z "$(ls -A "$PGDATA")" ]; then + gosu postgres initdb + fi + + exec gosu postgres "$@" +fi + +exec "$@" +``` + +Lastly, if you need to do some extra cleanup (or communicate with other containers) +on shutdown, or are co-ordinating more than one executable, you may need to ensure +that the `ENTRYPOINT` script receives the Unix signals, passes them on, and then +does some more work: + +``` +#!/bin/sh +# Note: I've written this using sh so it works in the busybox container too + +# USE the trap if you need to also do manual cleanup after the service is stopped, +# or need to start multiple services in the one container +trap "echo TRAPed signal" HUP INT QUIT TERM + +# start service in background here +/usr/sbin/apachectl start + +echo "[hit enter key to exit] or run 'docker stop '" +read + +# stop service and clean up here +echo "stopping apache" +/usr/sbin/apachectl stop + +echo "exited $0" +``` + +If you run this image with `docker run -it --rm -p 80:80 --name test apache`, +you can then examine the container's processes with `docker exec`, or `docker top`, +and then ask the script to stop Apache: + +```bash +$ docker exec -it test ps aux +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.1 0.0 4448 692 ? Ss+ 00:42 0:00 /bin/sh /run.sh 123 cmd cmd2 +root 19 0.0 0.2 71304 4440 ? Ss 00:42 0:00 /usr/sbin/apache2 -k start +www-data 20 0.2 0.2 360468 6004 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +www-data 21 0.2 0.2 360468 6000 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +root 81 0.0 0.1 15572 2140 ? R+ 00:44 0:00 ps aux +$ docker top test +PID USER COMMAND +10035 root {run.sh} /bin/sh /run.sh 123 cmd cmd2 +10054 root /usr/sbin/apache2 -k start +10055 33 /usr/sbin/apache2 -k start +10056 33 /usr/sbin/apache2 -k start +$ /usr/bin/time docker stop test +test +real 0m 0.27s +user 0m 0.03s +sys 0m 0.03s +``` + +> **Note:** you can override the `ENTRYPOINT` setting using `--entrypoint`, +> but this can only set the binary to *exec* (no `sh -c` will be used). + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `ENTRYPOINT [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `ENTRYPOINT [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. + +### Shell form ENTRYPOINT example + +You can specify a plain string for the `ENTRYPOINT` and it will execute in `/bin/sh -c`. +This form will use shell processing to substitute shell environment variables, +and will ignore any `CMD` or `docker run` command line arguments. +To ensure that `docker stop` will signal any long running `ENTRYPOINT` executable +correctly, you need to remember to start it with `exec`: + + FROM ubuntu + ENTRYPOINT exec top -b + +When you run this image, you'll see the single `PID 1` process: + + $ docker run -it --rm --name test top + Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached + CPU: 5% usr 0% sys 0% nic 94% idle 0% io 0% irq 0% sirq + Load average: 0.08 0.03 0.05 2/98 6 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root R 3164 0% 0% top -b + +Which will exit cleanly on `docker stop`: + + $ /usr/bin/time docker stop test + test + real 0m 0.20s + user 0m 0.02s + sys 0m 0.04s + +If you forget to add `exec` to the beginning of your `ENTRYPOINT`: + + FROM ubuntu + ENTRYPOINT top -b + CMD --ignored-param1 + +You can then run it (giving it a name for the next step): + + $ docker run -it --name test top --ignored-param2 + Mem: 1704184K used, 352484K free, 0K shrd, 0K buff, 140621524238337K cached + CPU: 9% usr 2% sys 0% nic 88% idle 0% io 0% irq 0% sirq + Load average: 0.01 0.02 0.05 2/101 7 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root S 3168 0% 0% /bin/sh -c top -b cmd cmd2 + 7 1 root R 3164 0% 0% top -b + +You can see from the output of `top` that the specified `ENTRYPOINT` is not `PID 1`. + +If you then run `docker stop test`, the container will not exit cleanly - the +`stop` command will be forced to send a `SIGKILL` after the timeout: + + $ docker exec -it test ps aux + PID USER COMMAND + 1 root /bin/sh -c top -b cmd cmd2 + 7 root top -b + 8 root ps aux + $ /usr/bin/time docker stop test + test + real 0m 10.19s + user 0m 0.04s + sys 0m 0.03s + +### Understand how CMD and ENTRYPOINT interact + +Both `CMD` and `ENTRYPOINT` instructions define what command gets executed when running a container. +There are few rules that describe their co-operation. + +1. Dockerfile should specify at least one of `CMD` or `ENTRYPOINT` commands. + +2. `ENTRYPOINT` should be defined when using the container as an executable. + +3. `CMD` should be used as a way of defining default arguments for an `ENTRYPOINT` command +or for executing an ad-hoc command in a container. + +4. `CMD` will be overridden when running the container with alternative arguments. + +The table below shows what command is executed for different `ENTRYPOINT` / `CMD` combinations: + +| | No ENTRYPOINT | ENTRYPOINT exec_entry p1_entry | ENTRYPOINT ["exec_entry", "p1_entry"] | +|--------------------------------|----------------------------|--------------------------------|------------------------------------------------| +| **No CMD** | *error, not allowed* | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry | +| **CMD ["exec_cmd", "p1_cmd"]** | exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry exec_cmd p1_cmd | +| **CMD ["p1_cmd", "p2_cmd"]** | p1_cmd p2_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry p1_cmd p2_cmd | +| **CMD exec_cmd p1_cmd** | /bin/sh -c exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry /bin/sh -c exec_cmd p1_cmd | + +## VOLUME + + VOLUME ["/data"] + +The `VOLUME` instruction creates a mount point with the specified name +and marks it as holding externally mounted volumes from native host or other +containers. The value can be a JSON array, `VOLUME ["/var/log/"]`, or a plain +string with multiple arguments, such as `VOLUME /var/log` or `VOLUME /var/log +/var/db`. For more information/examples and mounting instructions via the +Docker client, refer to +[*Share Directories via Volumes*](https://docs.docker.com/engine/tutorials/dockervolumes/#/mount-a-host-directory-as-a-data-volume) +documentation. + +The `docker run` command initializes the newly created volume with any data +that exists at the specified location within the base image. For example, +consider the following Dockerfile snippet: + + FROM ubuntu + RUN mkdir /myvol + RUN echo "hello world" > /myvol/greeting + VOLUME /myvol + +This Dockerfile results in an image that causes `docker run`, to +create a new mount point at `/myvol` and copy the `greeting` file +into the newly created volume. + +> **Note**: +> When using Windows-based containers, the destination of a volume inside the +> container must be one of: a non-existing or empty directory; or a drive other +> than C:. + +> **Note**: +> If any build steps change the data within the volume after it has been +> declared, those changes will be discarded. + +> **Note**: +> The list is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +## USER + + USER daemon + +The `USER` instruction sets the user name or UID to use when running the image +and for any `RUN`, `CMD` and `ENTRYPOINT` instructions that follow it in the +`Dockerfile`. + +## WORKDIR + + WORKDIR /path/to/workdir + +The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD`, +`ENTRYPOINT`, `COPY` and `ADD` instructions that follow it in the `Dockerfile`. +If the `WORKDIR` doesn't exist, it will be created even if it's not used in any +subsequent `Dockerfile` instruction. + +It can be used multiple times in the one `Dockerfile`. If a relative path +is provided, it will be relative to the path of the previous `WORKDIR` +instruction. For example: + + WORKDIR /a + WORKDIR b + WORKDIR c + RUN pwd + +The output of the final `pwd` command in this `Dockerfile` would be +`/a/b/c`. + +The `WORKDIR` instruction can resolve environment variables previously set using +`ENV`. You can only use environment variables explicitly set in the `Dockerfile`. +For example: + + ENV DIRPATH /path + WORKDIR $DIRPATH/$DIRNAME + RUN pwd + +The output of the final `pwd` command in this `Dockerfile` would be +`/path/$DIRNAME` + +## ARG + + ARG [=] + +The `ARG` instruction defines a variable that users can pass at build-time to +the builder with the `docker build` command using the `--build-arg =` +flag. If a user specifies a build argument that was not +defined in the Dockerfile, the build outputs a warning. + +``` +[Warning] One or more build-args [foo] were not consumed. +``` + +The Dockerfile author can define a single variable by specifying `ARG` once or many +variables by specifying `ARG` more than once. For example, a valid Dockerfile: + +``` +FROM busybox +ARG user1 +ARG buildno +... +``` + +A Dockerfile author may optionally specify a default value for an `ARG` instruction: + +``` +FROM busybox +ARG user1=someuser +ARG buildno=1 +... +``` + +If an `ARG` value has a default and if there is no value passed at build-time, the +builder uses the default. + +An `ARG` variable definition comes into effect from the line on which it is +defined in the `Dockerfile` not from the argument's use on the command-line or +elsewhere. For example, consider this Dockerfile: + +``` +1 FROM busybox +2 USER ${user:-some_user} +3 ARG user +4 USER $user +... +``` +A user builds this file by calling: + +``` +$ docker build --build-arg user=what_user . +``` + +The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the +subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is +defined and the `what_user` value was passed on the command line. Prior to its definition by an +`ARG` instruction, any use of a variable results in an empty string. + +> **Warning:** It is not recommended to use build-time variables for +> passing secrets like github keys, user credentials etc. Build-time variable +> values are visible to any user of the image with the `docker history` command. + +You can use an `ARG` or an `ENV` instruction to specify variables that are +available to the `RUN` instruction. Environment variables defined using the +`ENV` instruction always override an `ARG` instruction of the same name. Consider +this Dockerfile with an `ENV` and `ARG` instruction. + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER v1.0.0 +4 RUN echo $CONT_IMG_VER +``` +Then, assume this image is built with this command: + +``` +$ docker build --build-arg CONT_IMG_VER=v2.0.1 . +``` + +In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting +passed by the user:`v2.0.1` This behavior is similar to a shell +script where a locally scoped variable overrides the variables passed as +arguments or inherited from environment, from its point of definition. + +Using the example above but a different `ENV` specification you can create more +useful interactions between `ARG` and `ENV` instructions: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} +4 RUN echo $CONT_IMG_VER +``` + +Unlike an `ARG` instruction, `ENV` values are always persisted in the built +image. Consider a docker build without the `--build-arg` flag: + +``` +$ docker build . +``` + +Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but +its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. + +The variable expansion technique in this example allows you to pass arguments +from the command line and persist them in the final image by leveraging the +`ENV` instruction. Variable expansion is only supported for [a limited set of +Dockerfile instructions.](#environment-replacement) + +Docker has a set of predefined `ARG` variables that you can use without a +corresponding `ARG` instruction in the Dockerfile. + +* `HTTP_PROXY` +* `http_proxy` +* `HTTPS_PROXY` +* `https_proxy` +* `FTP_PROXY` +* `ftp_proxy` +* `NO_PROXY` +* `no_proxy` + +To use these, simply pass them on the command line using the flag: + +``` +--build-arg = +``` + +By default, these pre-defined variables are excluded from the output of +`docker history`. Excluding them reduces the risk of accidentally leaking +sensitive authentication information in an `HTTP_PROXY` variable. + +For example, consider building the following Dockerfile using +`--build-arg HTTP_PROXY=http://user:pass@proxy.lon.example.com` + +``` Dockerfile +FROM ubuntu +RUN echo "Hello World" +``` + +In this case, the value of the `HTTP_PROXY` variable is not available in the +`docker history` and is not cached. If you were to change location, and your +proxy server changed to `http://user:pass@proxy.sfo.example.com`, a subsequent +build does not result in a cache miss. + +If you need to override this behaviour then you may do so by adding an `ARG` +statement in the Dockerfile as follows: + +``` Dockerfile +FROM ubuntu +ARG HTTP_PROXY +RUN echo "Hello World" +``` + +When building this Dockerfile, the `HTTP_PROXY` is preserved in the +`docker history`, and changing its value invalidates the build cache. + +### Impact on build caching + +`ARG` variables are not persisted into the built image as `ENV` variables are. +However, `ARG` variables do impact the build cache in similar ways. If a +Dockerfile defines an `ARG` variable whose value is different from a previous +build, then a "cache miss" occurs upon its first usage, not its definition. In +particular, all `RUN` instructions following an `ARG` instruction use the `ARG` +variable implicitly (as an environment variable), thus can cause a cache miss. +All predefined `ARG` variables are exempt from caching unless there is a +matching `ARG` statement in the `Dockerfile`. + +For example, consider these two Dockerfile: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 RUN echo $CONT_IMG_VER +``` + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 RUN echo hello +``` + +If you specify `--build-arg CONT_IMG_VER=` on the command line, in both +cases, the specification on line 2 does not cause a cache miss; line 3 does +cause a cache miss.`ARG CONT_IMG_VER` causes the RUN line to be identified +as the same as running `CONT_IMG_VER=` echo hello, so if the `` +changes, we get a cache miss. + +Consider another example under the same command line: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER $CONT_IMG_VER +4 RUN echo $CONT_IMG_VER +``` +In this example, the cache miss occurs on line 3. The miss happens because +the variable's value in the `ENV` references the `ARG` variable and that +variable is changed through the command line. In this example, the `ENV` +command causes the image to include the value. + +If an `ENV` instruction overrides an `ARG` instruction of the same name, like +this Dockerfile: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER hello +4 RUN echo $CONT_IMG_VER +``` + +Line 3 does not cause a cache miss because the value of `CONT_IMG_VER` is a +constant (`hello`). As a result, the environment variables and values used on +the `RUN` (line 4) doesn't change between builds. + +## ONBUILD + + ONBUILD [INSTRUCTION] + +The `ONBUILD` instruction adds to the image a *trigger* instruction to +be executed at a later time, when the image is used as the base for +another build. The trigger will be executed in the context of the +downstream build, as if it had been inserted immediately after the +`FROM` instruction in the downstream `Dockerfile`. + +Any build instruction can be registered as a trigger. + +This is useful if you are building an image which will be used as a base +to build other images, for example an application build environment or a +daemon which may be customized with user-specific configuration. + +For example, if your image is a reusable Python application builder, it +will require application source code to be added in a particular +directory, and it might require a build script to be called *after* +that. You can't just call `ADD` and `RUN` now, because you don't yet +have access to the application source code, and it will be different for +each application build. You could simply provide application developers +with a boilerplate `Dockerfile` to copy-paste into their application, but +that is inefficient, error-prone and difficult to update because it +mixes with application-specific code. + +The solution is to use `ONBUILD` to register advance instructions to +run later, during the next build stage. + +Here's how it works: + +1. When it encounters an `ONBUILD` instruction, the builder adds a + trigger to the metadata of the image being built. The instruction + does not otherwise affect the current build. +2. At the end of the build, a list of all triggers is stored in the + image manifest, under the key `OnBuild`. They can be inspected with + the `docker inspect` command. +3. Later the image may be used as a base for a new build, using the + `FROM` instruction. As part of processing the `FROM` instruction, + the downstream builder looks for `ONBUILD` triggers, and executes + them in the same order they were registered. If any of the triggers + fail, the `FROM` instruction is aborted which in turn causes the + build to fail. If all triggers succeed, the `FROM` instruction + completes and the build continues as usual. +4. Triggers are cleared from the final image after being executed. In + other words they are not inherited by "grand-children" builds. + +For example you might add something like this: + + [...] + ONBUILD ADD . /app/src + ONBUILD RUN /usr/local/bin/python-build --dir /app/src + [...] + +> **Warning**: Chaining `ONBUILD` instructions using `ONBUILD ONBUILD` isn't allowed. + +> **Warning**: The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions. + +## STOPSIGNAL + + STOPSIGNAL signal + +The `STOPSIGNAL` instruction sets the system call signal that will be sent to the container to exit. +This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, +or a signal name in the format SIGNAME, for instance SIGKILL. + +## HEALTHCHECK + +The `HEALTHCHECK` instruction has two forms: + +* `HEALTHCHECK [OPTIONS] CMD command` (check container health by running a command inside the container) +* `HEALTHCHECK NONE` (disable any healthcheck inherited from the base image) + +The `HEALTHCHECK` instruction tells Docker how to test a container to check that +it is still working. This can detect cases such as a web server that is stuck in +an infinite loop and unable to handle new connections, even though the server +process is still running. + +When a container has a healthcheck specified, it has a _health status_ in +addition to its normal status. This status is initially `starting`. Whenever a +health check passes, it becomes `healthy` (whatever state it was previously in). +After a certain number of consecutive failures, it becomes `unhealthy`. + +The options that can appear before `CMD` are: + +* `--interval=DURATION` (default: `30s`) +* `--timeout=DURATION` (default: `30s`) +* `--start-period=DURATION` (default: `0s`) +* `--retries=N` (default: `3`) + +The health check will first run **interval** seconds after the container is +started, and then again **interval** seconds after each previous check completes. + +If a single run of the check takes longer than **timeout** seconds then the check +is considered to have failed. + +It takes **retries** consecutive failures of the health check for the container +to be considered `unhealthy`. + +**start period** provides initialization time for containers that need time to bootstrap. +Probe failure during that period will not be counted towards the maximum number of retries. +However, if a health check succeeds during the start period, the container is considered +started and all consecutive failures will be counted towards the maximum number of retries. + +There can only be one `HEALTHCHECK` instruction in a Dockerfile. If you list +more than one then only the last `HEALTHCHECK` will take effect. + +The command after the `CMD` keyword can be either a shell command (e.g. `HEALTHCHECK +CMD /bin/check-running`) or an _exec_ array (as with other Dockerfile commands; +see e.g. `ENTRYPOINT` for details). + +The command's exit status indicates the health status of the container. +The possible values are: + +- 0: success - the container is healthy and ready for use +- 1: unhealthy - the container is not working correctly +- 2: reserved - do not use this exit code + +For example, to check every five minutes or so that a web-server is able to +serve the site's main page within three seconds: + + HEALTHCHECK --interval=5m --timeout=3s \ + CMD curl -f http://localhost/ || exit 1 + +To help debug failing probes, any output text (UTF-8 encoded) that the command writes +on stdout or stderr will be stored in the health status and can be queried with +`docker inspect`. Such output should be kept short (only the first 4096 bytes +are stored currently). + +When the health status of a container changes, a `health_status` event is +generated with the new status. + +The `HEALTHCHECK` feature was added in Docker 1.12. + + +## SHELL + + SHELL ["executable", "parameters"] + +The `SHELL` instruction allows the default shell used for the *shell* form of +commands to be overridden. The default shell on Linux is `["/bin/sh", "-c"]`, and on +Windows is `["cmd", "/S", "/C"]`. The `SHELL` instruction *must* be written in JSON +form in a Dockerfile. + +The `SHELL` instruction is particularly useful on Windows where there are +two commonly used and quite different native shells: `cmd` and `powershell`, as +well as alternate shells available including `sh`. + +The `SHELL` instruction can appear multiple times. Each `SHELL` instruction overrides +all previous `SHELL` instructions, and affects all subsequent instructions. For example: + + FROM microsoft/windowsservercore + + # Executed as cmd /S /C echo default + RUN echo default + + # Executed as cmd /S /C powershell -command Write-Host default + RUN powershell -command Write-Host default + + # Executed as powershell -command Write-Host hello + SHELL ["powershell", "-command"] + RUN Write-Host hello + + # Executed as cmd /S /C echo hello + SHELL ["cmd", "/S"", "/C"] + RUN echo hello + +The following instructions can be affected by the `SHELL` instruction when the +*shell* form of them is used in a Dockerfile: `RUN`, `CMD` and `ENTRYPOINT`. + +The following example is a common pattern found on Windows which can be +streamlined by using the `SHELL` instruction: + + ... + RUN powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" + ... + +The command invoked by docker will be: + + cmd /S /C powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" + +This is inefficient for two reasons. First, there is an un-necessary cmd.exe command +processor (aka shell) being invoked. Second, each `RUN` instruction in the *shell* +form requires an extra `powershell -command` prefixing the command. + +To make this more efficient, one of two mechanisms can be employed. One is to +use the JSON form of the RUN command such as: + + ... + RUN ["powershell", "-command", "Execute-MyCmdlet", "-param1 \"c:\\foo.txt\""] + ... + +While the JSON form is unambiguous and does not use the un-necessary cmd.exe, +it does require more verbosity through double-quoting and escaping. The alternate +mechanism is to use the `SHELL` instruction and the *shell* form, +making a more natural syntax for Windows users, especially when combined with +the `escape` parser directive: + + # escape=` + + FROM microsoft/nanoserver + SHELL ["powershell","-command"] + RUN New-Item -ItemType Directory C:\Example + ADD Execute-MyCmdlet.ps1 c:\example\ + RUN c:\example\Execute-MyCmdlet -sample 'hello world' + +Resulting in: + + PS E:\docker\build\shell> docker build -t shell . + Sending build context to Docker daemon 4.096 kB + Step 1/5 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/5 : SHELL powershell -command + ---> Running in 6fcdb6855ae2 + ---> 6331462d4300 + Removing intermediate container 6fcdb6855ae2 + Step 3/5 : RUN New-Item -ItemType Directory C:\Example + ---> Running in d0eef8386e97 + + + Directory: C:\ + + + Mode LastWriteTime Length Name + ---- ------------- ------ ---- + d----- 10/28/2016 11:26 AM Example + + + ---> 3f2fbf1395d9 + Removing intermediate container d0eef8386e97 + Step 4/5 : ADD Execute-MyCmdlet.ps1 c:\example\ + ---> a955b2621c31 + Removing intermediate container b825593d39fc + Step 5/5 : RUN c:\example\Execute-MyCmdlet 'hello world' + ---> Running in be6d8e63fe75 + hello world + ---> 8e559e9bf424 + Removing intermediate container be6d8e63fe75 + Successfully built 8e559e9bf424 + PS E:\docker\build\shell> + +The `SHELL` instruction could also be used to modify the way in which +a shell operates. For example, using `SHELL cmd /S /C /V:ON|OFF` on Windows, delayed +environment variable expansion semantics could be modified. + +The `SHELL` instruction can also be used on Linux should an alternate shell be +required such as `zsh`, `csh`, `tcsh` and others. + +The `SHELL` feature was added in Docker 1.12. + +## Dockerfile examples + +Below you can see some examples of Dockerfile syntax. If you're interested in +something more realistic, take a look at the list of [Dockerization examples](https://docs.docker.com/engine/examples/). + +``` +# Nginx +# +# VERSION 0.0.1 + +FROM ubuntu +LABEL Description="This image is used to start the foobar executable" Vendor="ACME Products" Version="1.0" +RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server +``` + +``` +# Firefox over VNC +# +# VERSION 0.3 + +FROM ubuntu + +# Install vnc, xvfb in order to create a 'fake' display and firefox +RUN apt-get update && apt-get install -y x11vnc xvfb firefox +RUN mkdir ~/.vnc +# Setup a password +RUN x11vnc -storepasswd 1234 ~/.vnc/passwd +# Autostart firefox (might not be the best way, but it does the trick) +RUN bash -c 'echo "firefox" >> /.bashrc' + +EXPOSE 5900 +CMD ["x11vnc", "-forever", "-usepw", "-create"] +``` + +``` +# Multiple images example +# +# VERSION 0.1 + +FROM ubuntu +RUN echo foo > bar +# Will output something like ===> 907ad6c2736f + +FROM ubuntu +RUN echo moo > oink +# Will output something like ===> 695d7793cbe4 + +# You'll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with +# /oink. +``` diff --git a/components/engine/docs/reference/commandline/attach.md b/components/engine/docs/reference/commandline/attach.md new file mode 100644 index 0000000000..4153331bad --- /dev/null +++ b/components/engine/docs/reference/commandline/attach.md @@ -0,0 +1,160 @@ +--- +title: "attach" +description: "The attach command description and usage" +keywords: "attach, running, container" +--- + + + +# attach + +```markdown +Usage: docker attach [OPTIONS] CONTAINER + +Attach local standard input, output, and error streams to a running container + +Options: + --detach-keys string Override the key sequence for detaching a container + --help Print usage + --no-stdin Do not attach STDIN + --sig-proxy Proxy all received signals to the process (default true) +``` + +## Description + +Use `docker attach` to attach your terminal's standard input, output, and error +(or any combination of the three) to a running container using the container's +ID or name. This allows you to view its ongoing output or to control it +interactively, as though the commands were running directly in your terminal. + +> **Note:** +> The `attach` command will display the output of the `ENTRYPOINT/CMD` process. This +> can appear as if the attach command is hung when in fact the process may simply +> not be interacting with the terminal at that time. + +You can attach to the same contained process multiple times simultaneously, +even as a different user with the appropriate permissions. + +To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the +container. If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` to +the container. You can detach from a container and leave it running using the + `CTRL-p CTRL-q` key sequence. + +> **Note:** +> A process running as PID 1 inside a container is treated specially by +> Linux: it ignores any signal with the default action. So, the process +> will not terminate on `SIGINT` or `SIGTERM` unless it is coded to do +> so. + +It is forbidden to redirect the standard input of a `docker attach` command +while attaching to a tty-enabled container (i.e.: launched with `-t`). + +While a client is connected to container's stdio using `docker attach`, Docker +uses a ~1MB memory buffer to maximize the throughput of the application. If +this buffer is filled, the speed of the API connection will start to have an +effect on the process output writing speed. This is similar to other +applications like SSH. Because of this, it is not recommended to run +performance critical applications that generate a lot of output in the +foreground over a slow client connection. Instead, users should use the +`docker logs` command to get access to the logs. + +### Override the detach sequence + +If you want, you can configure an override the Docker key sequence for detach. +This is useful if the Docker default sequence conflicts with key sequence you +use for other applications. There are two ways to define your own detach key +sequence, as a per-container override or as a configuration property on your +entire configuration. + +To override the sequence for an individual container, use the +`--detach-keys=""` flag with the `docker attach` command. The format of +the `` is either a letter [a-Z], or the `ctrl-` combined with any of +the following: + +* `a-z` (a single lowercase alpha character ) +* `@` (at sign) +* `[` (left bracket) +* `\\` (two backward slashes) +* `_` (underscore) +* `^` (caret) + +These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key +sequences. To configure a different configuration default key sequence for all +containers, see [**Configuration file** section](cli.md#configuration-files). + +## Examples + +### Attach to and detach from a running container + +```bash +$ docker run -d --name topdemo ubuntu /usr/bin/top -b + +$ docker attach topdemo + +top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 +Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie +Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st +Mem: 373572k total, 355560k used, 18012k free, 27872k buffers +Swap: 786428k total, 0k used, 786428k free, 221740k cached + +PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + + top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355244k used, 18328k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + + + top - 02:05:58 up 3:06, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.2%us, 0.3%sy, 0.0%ni, 99.5%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355780k used, 17792k free, 27880k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top +^C$ + +$ echo $? +0 +$ docker ps -a | grep topdemo + +7998ac8581f9 ubuntu:14.04 "/usr/bin/top -b" 38 seconds ago Exited (0) 21 seconds ago topdemo +``` + +### Get the exit code of the container's command + +And in this second example, you can see the exit code returned by the `bash` +process is returned by the `docker attach` command to its caller too: + +```bash + $ docker run --name test -d -it debian + + 275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab + + $ docker attach test + + root@f38c87f2a42d:/# exit 13 + + exit + + $ echo $? + + 13 + + $ docker ps -a | grep test + + 275c44472aeb debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test +``` diff --git a/components/engine/docs/reference/commandline/build.md b/components/engine/docs/reference/commandline/build.md new file mode 100644 index 0000000000..9f587372c6 --- /dev/null +++ b/components/engine/docs/reference/commandline/build.md @@ -0,0 +1,581 @@ +--- +title: "build" +description: "The build command description and usage" +keywords: "build, docker, image" +--- + + + +# build + +```markdown +Usage: docker build [OPTIONS] PATH | URL | - + +Build an image from a Dockerfile + +Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + --build-arg value Set build-time variables (default []) + --cache-from value Images to consider as cache sources (default []) + --cgroup-parent string Optional parent cgroup for the container + --compress Compress the build context using gzip + --cpu-period int Limit the CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit the CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --disable-content-trust Skip image verification (default true) + -f, --file string Name of the Dockerfile (Default is 'PATH/Dockerfile') + --force-rm Always remove intermediate containers + --help Print usage + --iidfile string Write the image ID to the file + --isolation string Container isolation technology + --label value Set metadata for an image (default []) + -m, --memory string Memory limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --network string Set the networking mode for the RUN instructions during build + 'bridge': use default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-cache Do not use cache when building the image + --pull Always attempt to pull a newer version of the image + -q, --quiet Suppress the build output and print image ID on success + --rm Remove intermediate containers after a successful build (default true) + --security-opt value Security Options (default []) + --shm-size bytes Size of /dev/shm + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --squash Squash newly built layers into a single new layer (**Experimental Only**) + -t, --tag value Name and optionally a tag in the 'name:tag' format (default []) + --target string Set the target build stage to build. + --ulimit value Ulimit options (default []) +``` + +## Description + +Builds Docker images from a Dockerfile and a "context". A build's context is +the files located in the specified `PATH` or `URL`. The build process can refer +to any of the files in the context. For example, your build can use an +[*ADD*](../builder.md#add) instruction to reference a file in the +context. + +The `URL` parameter can refer to three kinds of resources: Git repositories, +pre-packaged tarball contexts and plain text files. + +### Git repositories + +When the `URL` parameter points to the location of a Git repository, the +repository acts as the build context. The system recursively fetches the +repository and its submodules. The commit history is not preserved. A +repository is first pulled into a temporary directory on your local host. After +the that succeeds, the directory is sent to the Docker daemon as the context. +Local copy gives you the ability to access private repositories using local +user credentials, VPN's, and so forth. + +> **Note:** +> If the `URL` parameter contains a fragment the system will recursively clone +> the repository and its submodules using a `git clone --recursive` command. + +Git URLs accept context configuration in their fragment section, separated by a +colon `:`. The first part represents the reference that Git will check out, +this can be either a branch, a tag, or a remote reference. The second part +represents a subdirectory inside the repository that will be used as a build +context. + +For example, run this command to use a directory called `docker` in the branch +`container`: + +```bash +$ docker build https://github.com/docker/rootfs.git#container:docker +``` + +The following table represents all the valid suffixes with their build +contexts: + +Build Syntax Suffix | Commit Used | Build Context Used +--------------------------------|-----------------------|------------------- +`myrepo.git` | `refs/heads/master` | `/` +`myrepo.git#mytag` | `refs/tags/mytag` | `/` +`myrepo.git#mybranch` | `refs/heads/mybranch` | `/` +`myrepo.git#pull/42/head` | `refs/pull/42/head` | `/` +`myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder` +`myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder` +`myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder` +`myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder` + + +### Tarball contexts + +If you pass an URL to a remote tarball, the URL itself is sent to the daemon: + +```bash +$ docker build http://server/context.tar.gz +``` + +The download operation will be performed on the host the Docker daemon is +running on, which is not necessarily the same host from which the build command +is being issued. The Docker daemon will fetch `context.tar.gz` and use it as the +build context. Tarball contexts must be tar archives conforming to the standard +`tar` UNIX format and can be compressed with any one of the 'xz', 'bzip2', +'gzip' or 'identity' (no compression) formats. + +### Text files + +Instead of specifying a context, you can pass a single `Dockerfile` in the +`URL` or pipe the file in via `STDIN`. To pipe a `Dockerfile` from `STDIN`: + +```bash +$ docker build - < Dockerfile +``` + +With Powershell on Windows, you can run: + +```powershell +Get-Content Dockerfile | docker build - +``` + +If you use `STDIN` or specify a `URL` pointing to a plain text file, the system +places the contents into a file called `Dockerfile`, and any `-f`, `--file` +option is ignored. In this scenario, there is no context. + +By default the `docker build` command will look for a `Dockerfile` at the root +of the build context. The `-f`, `--file`, option lets you specify the path to +an alternative file to use instead. This is useful in cases where the same set +of files are used for multiple builds. The path must be to a file within the +build context. If a relative path is specified then it is interpreted as +relative to the root of the context. + +In most cases, it's best to put each Dockerfile in an empty directory. Then, +add to that directory only the files needed for building the Dockerfile. To +increase the build's performance, you can exclude files and directories by +adding a `.dockerignore` file to that directory as well. For information on +creating one, see the [.dockerignore file](../builder.md#dockerignore-file). + +If the Docker client loses connection to the daemon, the build is canceled. +This happens if you interrupt the Docker client with `CTRL-c` or if the Docker +client is killed for any reason. If the build initiated a pull which is still +running at the time the build is cancelled, the pull is cancelled as well. + +## Return code + +On a successful build, a return code of success `0` will be returned. When the +build fails, a non-zero failure code will be returned. + +There should be informational output of the reason for failure output to +`STDERR`: + +```bash +$ docker build -t fail . + +Sending build context to Docker daemon 2.048 kB +Sending build context to Docker daemon +Step 1/3 : FROM busybox + ---> 4986bf8c1536 +Step 2/3 : RUN exit 13 + ---> Running in e26670ec7a0a +INFO[0000] The command [/bin/sh -c exit 13] returned a non-zero code: 13 +$ echo $? +1 +``` + +See also: + +[*Dockerfile Reference*](../builder.md). + +## Examples + +### Build with PATH + +```bash +$ docker build . + +Uploading context 10240 bytes +Step 1/3 : FROM busybox +Pulling repository busybox + ---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/ +Step 2/3 : RUN ls -lh / + ---> Running in 9c9e81692ae9 +total 24 +drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin +drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev +drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc +drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib +lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib +dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc +lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin +dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys +drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp +drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr + ---> b35f4035db3f +Step 3/3 : CMD echo Hello world + ---> Running in 02071fceb21b + ---> f52f38b7823e +Successfully built f52f38b7823e +Removing intermediate container 9c9e81692ae9 +Removing intermediate container 02071fceb21b +``` + +This example specifies that the `PATH` is `.`, and so all the files in the +local directory get `tar`d and sent to the Docker daemon. The `PATH` specifies +where to find the files for the "context" of the build on the Docker daemon. +Remember that the daemon could be running on a remote machine and that no +parsing of the Dockerfile happens at the client side (where you're running +`docker build`). That means that *all* the files at `PATH` get sent, not just +the ones listed to [*ADD*](../builder.md#add) in the Dockerfile. + +The transfer of context from the local machine to the Docker daemon is what the +`docker` client means when you see the "Sending build context" message. + +If you wish to keep the intermediate containers after the build is complete, +you must use `--rm=false`. This does not affect the build cache. + +### Build with URL + +```bash +$ docker build github.com/creack/docker-firefox +``` + +This will clone the GitHub repository and use the cloned repository as context. +The Dockerfile at the root of the repository is used as Dockerfile. You can +specify an arbitrary Git repository by using the `git://` or `git@` scheme. + +```bash +$ docker build -f ctx/Dockerfile http://server/ctx.tar.gz + +Downloading context: http://server/ctx.tar.gz [===================>] 240 B/240 B +Step 1/3 : FROM busybox + ---> 8c2e06607696 +Step 2/3 : ADD ctx/container.cfg / + ---> e7829950cee3 +Removing intermediate container b35224abf821 +Step 3/3 : CMD /bin/ls + ---> Running in fbc63d321d73 + ---> 3286931702ad +Removing intermediate container fbc63d321d73 +Successfully built 377c409b35e4 +``` + +This sends the URL `http://server/ctx.tar.gz` to the Docker daemon, which +downloads and extracts the referenced tarball. The `-f ctx/Dockerfile` +parameter specifies a path inside `ctx.tar.gz` to the `Dockerfile` that is used +to build the image. Any `ADD` commands in that `Dockerfile` that refers to local +paths must be relative to the root of the contents inside `ctx.tar.gz`. In the +example above, the tarball contains a directory `ctx/`, so the `ADD +ctx/container.cfg /` operation works as expected. + +### Build with - + +```bash +$ docker build - < Dockerfile +``` + +This will read a Dockerfile from `STDIN` without context. Due to the lack of a +context, no contents of any local directory will be sent to the Docker daemon. +Since there is no context, a Dockerfile `ADD` only works if it refers to a +remote URL. + +```bash +$ docker build - < context.tar.gz +``` + +This will build an image for a compressed context read from `STDIN`. Supported +formats are: bzip2, gzip and xz. + +### Use a .dockerignore file + +```bash +$ docker build . + +Uploading context 18.829 MB +Uploading context +Step 1/2 : FROM busybox + ---> 769b9341d937 +Step 2/2 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 +Successfully built 99cc1ad10469 +$ echo ".git" > .dockerignore +$ docker build . +Uploading context 6.76 MB +Uploading context +Step 1/2 : FROM busybox + ---> 769b9341d937 +Step 2/2 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 +Successfully built 99cc1ad10469 +``` + +This example shows the use of the `.dockerignore` file to exclude the `.git` +directory from the context. Its effect can be seen in the changed size of the +uploaded context. The builder reference contains detailed information on +[creating a .dockerignore file](../builder.md#dockerignore-file) + +### Tag an image (-t) + +```bash +$ docker build -t vieux/apache:2.0 . +``` + +This will build like the previous example, but it will then tag the resulting +image. The repository name will be `vieux/apache` and the tag will be `2.0`. +[Read more about valid tags](tag.md). + +You can apply multiple tags to an image. For example, you can apply the `latest` +tag to a newly built image and add another tag that references a specific +version. +For example, to tag an image both as `whenry/fedora-jboss:latest` and +`whenry/fedora-jboss:v2.1`, use the following: + +```bash +$ docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . +``` + +### Specify a Dockerfile (-f) + +```bash +$ docker build -f Dockerfile.debug . +``` + +This will use a file called `Dockerfile.debug` for the build instructions +instead of `Dockerfile`. + +```bash +$ curl example.com/remote/Dockerfile | docker build -f - . +``` + +The above command will use the current directory as the build context and read +a Dockerfile from stdin. + +```bash +$ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug . +$ docker build -f dockerfiles/Dockerfile.prod -t myapp_prod . +``` + +The above commands will build the current build context (as specified by the +`.`) twice, once using a debug version of a `Dockerfile` and once using a +production version. + +```bash +$ cd /home/me/myapp/some/dir/really/deep +$ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp +$ docker build -f ../../../../dockerfiles/debug /home/me/myapp +``` + +These two `docker build` commands do the exact same thing. They both use the +contents of the `debug` file instead of looking for a `Dockerfile` and will use +`/home/me/myapp` as the root of the build context. Note that `debug` is in the +directory structure of the build context, regardless of how you refer to it on +the command line. + +> **Note:** +> `docker build` will return a `no such file or directory` error if the +> file or directory does not exist in the uploaded context. This may +> happen if there is no context, or if you specify a file that is +> elsewhere on the Host system. The context is limited to the current +> directory (and its children) for security reasons, and to ensure +> repeatable builds on remote Docker hosts. This is also the reason why +> `ADD ../file` will not work. + +### Use a custom parent cgroup (--cgroup-parent) + +When `docker build` is run with the `--cgroup-parent` option the containers +used in the build will be run with the [corresponding `docker run` +flag](../run.md#specifying-custom-cgroups). + +### Set ulimits in container (--ulimit) + +Using the `--ulimit` option with `docker build` will cause each build step's +container to be started using those [`--ulimit` +flag values](./run.md#set-ulimits-in-container-ulimit). + +### Set build-time variables (--build-arg) + +You can use `ENV` instructions in a Dockerfile to define variable +values. These values persist in the built image. However, often +persistence is not what you want. Users want to specify variables differently +depending on which host they build an image on. + +A good example is `http_proxy` or source versions for pulling intermediate +files. The `ARG` instruction lets Dockerfile authors define values that users +can set at build-time using the `--build-arg` flag: + +```bash +$ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 . +``` + +This flag allows you to pass the build-time variables that are +accessed like regular environment variables in the `RUN` instruction of the +Dockerfile. Also, these values don't persist in the intermediate or final images +like `ENV` values do. + +Using this flag will not alter the output you see when the `ARG` lines from the +Dockerfile are echoed during the build process. + +For detailed information on using `ARG` and `ENV` instructions, see the +[Dockerfile reference](../builder.md). + +### Optional security options (--security-opt) + +This flag is only supported on a daemon running on Windows, and only supports +the `credentialspec` option. The `credentialspec` must be in the format +`file://spec.txt` or `registry://keyname`. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + + +| Value | Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | +| `process` | Namespace isolation only. | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + +### Add entries to container hosts file (--add-host) + +You can add other hosts into a container's `/etc/hosts` file by using one or +more `--add-host` flags. This example adds a static address for a host named +`docker`: + + $ docker build --add-host=docker:10.180.0.1 . + +### Specifying target build stage (--target) + +When building a Dockerfile with multiple build stages, `--target` can be used to +specify an intermediate build stage by name as a final stage for the resulting +image. Commands after the target stage will be skipped. + +```Dockerfile +FROM debian AS build-env +... + +FROM alpine AS production-env +... +``` + +```bash +$ docker build -t mybuildimage --target build-env . +``` + +### Squash an image's layers (--squash) **Experimental Only** + +#### Overview + +Once the image is built, squash the new layers into a new image with a single +new layer. Squashing does not destroy any existing image, rather it creates a new +image with the content of the squashed layers. This effectively makes it look +like all `Dockerfile` commands were created with a single layer. The build +cache is preserved with this method. + +**Note**: using this option means the new image will not be able to take +advantage of layer sharing with other images and may use significantly more +space. + +**Note**: using this option you may see significantly more space used due to +storing two copies of the image, one for the build cache with all the cache +layers in tact, and one for the squashed version. + +#### Prerequisites + +The example on this page is using experimental mode in Docker 1.13. + +Experimental mode can be enabled by using the `--experimental` flag when starting the Docker daemon or setting `experimental: true` in the `daemon.json` configuration file. + +By default, experimental mode is disabled. To see the current configuration, use the `docker version` command. + +```none + +Server: + Version: 1.13.1 + API version: 1.26 (minimum version 1.12) + Go version: go1.7.5 + Git commit: 092cba3 + Built: Wed Feb 8 06:35:24 2017 + OS/Arch: linux/amd64 + Experimental: false + + [...] + +``` + +To enable experimental mode, users need to restart the docker daemon with the experimental flag enabled. + +#### Enable Docker experimental + +Experimental features are now included in the standard Docker binaries as of version 1.13.0. For enabling experimental features, you need to start the Docker daemon with `--experimental` flag. You can also enable the daemon flag via /etc/docker/daemon.json. e.g. + +``` + +{ + "experimental": true +} + +``` +Then make sure the experimental flag is enabled: + +```bash + +$ docker version -f '{{.Server.Experimental}}' +true + +``` + +#### Build an image with `--squash` argument + +The following is an example of docker build with `--squash` argument + +```Dockerfile + +FROM busybox +RUN echo hello > /hello +RUN echo world >> /hello +RUN touch remove_me /remove_me +ENV HELLO world +RUN rm /remove_me + +``` +An image named `test` is built with `--squash` argument. + +```bash + +$ docker build --squash -t test . + +[...] + +``` + +If everything is right, the history will look like this: + +```bash +$ docker history test + +IMAGE CREATED CREATED BY SIZE COMMENT +4e10cb5b4cac 3 seconds ago 12 B merge sha256:88a7b0112a41826885df0e7072698006ee8f621c6ab99fca7fe9151d7b599702 to sha256:47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb + 5 minutes ago /bin/sh -c rm /remove_me 0 B + 5 minutes ago /bin/sh -c #(nop) ENV HELLO=world 0 B + 5 minutes ago /bin/sh -c touch remove_me /remove_me 0 B + 5 minutes ago /bin/sh -c echo world >> /hello 0 B + 6 minutes ago /bin/sh -c echo hello > /hello 0 B + 7 weeks ago /bin/sh -c #(nop) CMD ["sh"] 0 B + 7 weeks ago /bin/sh -c #(nop) ADD file:47ca6e777c36a4cfff 1.113 MB + +``` +We could find that all layer's name is ``, and there is a new layer with COMMENT `merge`. + +Test the image, check for `/remove_me` being gone, make sure `hello\nworld` is in `/hello`, make sure the `HELLO` envvar's value is `world`. diff --git a/components/engine/docs/reference/commandline/cli.md b/components/engine/docs/reference/commandline/cli.md new file mode 100644 index 0000000000..4d1cd2a638 --- /dev/null +++ b/components/engine/docs/reference/commandline/cli.md @@ -0,0 +1,317 @@ +--- +title: "Use the Docker command line" +description: "Docker's CLI command description and usage" +keywords: "Docker, Docker documentation, CLI, command line" +--- + + + +# docker + +To list available commands, either run `docker` with no parameters +or execute `docker help`: + +```bash +$ docker +Usage: docker [OPTIONS] COMMAND [ARG...] + docker [ --help | -v | --version ] + +A self-sufficient runtime for containers. + +Options: + --config string Location of client config files (default "/root/.docker") + -D, --debug Enable debug mode + --help Print usage + -H, --host value Daemon socket(s) to connect to (default []) + -l, --log-level string Set the logging level ("debug"|"info"|"warn"|"error"|"fatal") (default "info") + --tls Use TLS; implied by --tlsverify + --tlscacert string Trust certs signed only by this CA (default "/root/.docker/ca.pem") + --tlscert string Path to TLS certificate file (default "/root/.docker/cert.pem") + --tlskey string Path to TLS key file (default "/root/.docker/key.pem") + --tlsverify Use TLS and verify the remote + -v, --version Print version information and quit + +Commands: + attach Attach to a running container + # […] +``` + +## Description + +Depending on your Docker system configuration, you may be required to preface +each `docker` command with `sudo`. To avoid having to use `sudo` with the +`docker` command, your system administrator can create a Unix group called +`docker` and add users to it. + +For more information about installing Docker or `sudo` configuration, refer to +the [installation](https://docs.docker.com/engine/installation/) instructions for your operating system. + +### Environment variables + +For easy reference, the following list of environment variables are supported +by the `docker` command line: + +* `DOCKER_API_VERSION` The API version to use (e.g. `1.19`) +* `DOCKER_CONFIG` The location of your client configuration files. +* `DOCKER_CERT_PATH` The location of your authentication keys. +* `DOCKER_DRIVER` The graph driver to use. +* `DOCKER_HOST` Daemon socket to connect to. +* `DOCKER_NOWARN_KERNEL_VERSION` Prevent warnings that your Linux kernel is + unsuitable for Docker. +* `DOCKER_RAMDISK` If set this will disable 'pivot_root'. +* `DOCKER_TLS_VERIFY` When set Docker uses TLS and verifies the remote. +* `DOCKER_CONTENT_TRUST` When set Docker uses notary to sign and verify images. + Equates to `--disable-content-trust=false` for build, create, pull, push, run. +* `DOCKER_CONTENT_TRUST_SERVER` The URL of the Notary server to use. This defaults + to the same URL as the registry. +* `DOCKER_HIDE_LEGACY_COMMANDS` When set, Docker hides "legacy" top-level commands (such as `docker rm`, and + `docker pull`) in `docker help` output, and only `Management commands` per object-type (e.g., `docker container`) are + printed. This may become the default in a future release, at which point this environment-variable is removed. +* `DOCKER_TMPDIR` Location for temporary Docker files. + +Because Docker is developed using Go, you can also use any environment +variables used by the Go runtime. In particular, you may find these useful: + +* `HTTP_PROXY` +* `HTTPS_PROXY` +* `NO_PROXY` + +These Go environment variables are case-insensitive. See the +[Go specification](http://golang.org/pkg/net/http/) for details on these +variables. + +### Configuration files + +By default, the Docker command line stores its configuration files in a +directory called `.docker` within your `$HOME` directory. However, you can +specify a different location via the `DOCKER_CONFIG` environment variable +or the `--config` command line option. If both are specified, then the +`--config` option overrides the `DOCKER_CONFIG` environment variable. +For example: + + docker --config ~/testconfigs/ ps + +Instructs Docker to use the configuration files in your `~/testconfigs/` +directory when running the `ps` command. + +Docker manages most of the files in the configuration directory +and you should not modify them. However, you *can modify* the +`config.json` file to control certain aspects of how the `docker` +command behaves. + +Currently, you can modify the `docker` command behavior using environment +variables or command-line options. You can also use options within +`config.json` to modify some of the same behavior. When using these +mechanisms, you must keep in mind the order of precedence among them. Command +line options override environment variables and environment variables override +properties you specify in a `config.json` file. + +The `config.json` file stores a JSON encoding of several properties: + +The property `HttpHeaders` specifies a set of headers to include in all messages +sent from the Docker client to the daemon. Docker does not try to interpret or +understand these header; it simply puts them into the messages. Docker does +not allow these headers to change any headers it sets for itself. + +The property `psFormat` specifies the default format for `docker ps` output. +When the `--format` flag is not provided with the `docker ps` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see the +[**Formatting** section in the `docker ps` documentation](ps.md) + +The property `imagesFormat` specifies the default format for `docker images` output. +When the `--format` flag is not provided with the `docker images` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see the [**Formatting** section in the `docker images` documentation](images.md) + +The property `pluginsFormat` specifies the default format for `docker plugin ls` output. +When the `--format` flag is not provided with the `docker plugin ls` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see the [**Formatting** section in the `docker plugin ls` documentation](plugin_ls.md) + +The property `servicesFormat` specifies the default format for `docker +service ls` output. When the `--format` flag is not provided with the +`docker service ls` command, Docker's client uses this property. If this +property is not set, the client falls back to the default json format. For a +list of supported formatting directives, see the +[**Formatting** section in the `docker service ls` documentation](service_ls.md) + +The property `serviceInspectFormat` specifies the default format for `docker +service inspect` output. When the `--format` flag is not provided with the +`docker service inspect` command, Docker's client uses this property. If this +property is not set, the client falls back to the default json format. For a +list of supported formatting directives, see the +[**Formatting** section in the `docker service inspect` documentation](service_inspect.md) + +The property `statsFormat` specifies the default format for `docker +stats` output. When the `--format` flag is not provided with the +`docker stats` command, Docker's client uses this property. If this +property is not set, the client falls back to the default table +format. For a list of supported formatting directives, see +[**Formatting** section in the `docker stats` documentation](stats.md) + +The property `secretFormat` specifies the default format for `docker +secret ls` output. When the `--format` flag is not provided with the +`docker secret ls` command, Docker's client uses this property. If this +property is not set, the client falls back to the default table +format. For a list of supported formatting directives, see +[**Formatting** section in the `docker secret ls` documentation](secret_ls.md) + + +The property `nodesFormat` specifies the default format for `docker node ls` output. +When the `--format` flag is not provided with the `docker node ls` command, +Docker's client uses the value of `nodesFormat`. If the value of `nodesFormat` is not set, +the client uses the default table format. For a list of supported formatting +directives, see the [**Formatting** section in the `docker node ls` documentation](node_ls.md) + +The property `configFormat` specifies the default format for `docker +config ls` output. When the `--format` flag is not provided with the +`docker config ls` command, Docker's client uses this property. If this +property is not set, the client falls back to the default table +format. For a list of supported formatting directives, see +[**Formatting** section in the `docker config ls` documentation](config_ls.md) + +The property `credsStore` specifies an external binary to serve as the default +credential store. When this property is set, `docker login` will attempt to +store credentials in the binary specified by `docker-credential-` which +is visible on `$PATH`. If this property is not set, credentials will be stored +in the `auths` property of the config. For more information, see the +[**Credentials store** section in the `docker login` documentation](login.md#credentials-store) + +The property `credHelpers` specifies a set of credential helpers to use +preferentially over `credsStore` or `auths` when storing and retrieving +credentials for specific registries. If this property is set, the binary +`docker-credential-` will be used when storing or retrieving credentials +for a specific registry. For more information, see the +[**Credential helpers** section in the `docker login` documentation](login.md#credential-helpers) + +Once attached to a container, users detach from it and leave it running using +the using `CTRL-p CTRL-q` key sequence. This detach key sequence is customizable +using the `detachKeys` property. Specify a `` value for the +property. The format of the `` is a comma-separated list of either +a letter [a-Z], or the `ctrl-` combined with any of the following: + +* `a-z` (a single lowercase alpha character ) +* `@` (at sign) +* `[` (left bracket) +* `\\` (two backward slashes) +* `_` (underscore) +* `^` (caret) + +Your customization applies to all containers started in with your Docker client. +Users can override your custom or the default key sequence on a per-container +basis. To do this, the user specifies the `--detach-keys` flag with the `docker +attach`, `docker exec`, `docker run` or `docker start` command. + +Following is a sample `config.json` file: + +```json +{ + "HttpHeaders": { + "MyHeader": "MyValue" + }, + "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", + "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", + "pluginsFormat": "table {{.ID}}\t{{.Name}}\t{{.Enabled}}", + "statsFormat": "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}", + "servicesFormat": "table {{.ID}}\t{{.Name}}\t{{.Mode}}", + "secretFormat": "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}", + "configFormat": "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}", + "serviceInspectFormat": "pretty", + "nodesFormat": "table {{.ID}}\t{{.Hostname}}\t{{.Availability}}", + "detachKeys": "ctrl-e,e", + "credsStore": "secretservice", + "credHelpers": { + "awesomereg.example.org": "hip-star", + "unicorn.example.com": "vcbait" + } +} +``` + +### Notary + +If using your own notary server and a self-signed certificate or an internal +Certificate Authority, you need to place the certificate at +`tls//ca.crt` in your docker config directory. + +Alternatively you can trust the certificate globally by adding it to your system's +list of root Certificate Authorities. + +## Examples + +### Display help text + +To list the help on any command just execute the command, followed by the +`--help` option. + + $ docker run --help + + Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] + + Run a command in a new container + + Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + ... + +### Option types + +Single character command line options can be combined, so rather than +typing `docker run -i -t --name test busybox sh`, +you can write `docker run -it --name test busybox sh`. + +#### Boolean + +Boolean options take the form `-d=false`. The value you see in the help text is +the default value which is set if you do **not** specify that flag. If you +specify a Boolean flag without a value, this will set the flag to `true`, +irrespective of the default value. + +For example, running `docker run -d` will set the value to `true`, so your +container **will** run in "detached" mode, in the background. + +Options which default to `true` (e.g., `docker build --rm=true`) can only be +set to the non-default value by explicitly setting them to `false`: + +```bash +$ docker build --rm=false . +``` + +#### Multi + +You can specify options like `-a=[]` multiple times in a single command line, +for example in these commands: + +```bash +$ docker run -a stdin -a stdout -i -t ubuntu /bin/bash + +$ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls +``` + +Sometimes, multiple options can call for a more complex value string as for +`-v`: + +```bash +$ docker run -v /host:/container example/mysql +``` + +> **Note**: Do not use the `-t` and `-a stderr` options together due to +> limitations in the `pty` implementation. All `stderr` in `pty` mode +> simply goes to `stdout`. + +#### Strings and Integers + +Options like `--name=""` expect a string, and they +can only be specified once. Options like `-c=0` +expect an integer, and they can only be specified once. diff --git a/components/engine/docs/reference/commandline/commit.md b/components/engine/docs/reference/commandline/commit.md new file mode 100644 index 0000000000..f713eeab97 --- /dev/null +++ b/components/engine/docs/reference/commandline/commit.md @@ -0,0 +1,117 @@ +--- +title: "commit" +description: "The commit command description and usage" +keywords: "commit, file, changes" +--- + + + +# commit + +```markdown +Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] + +Create a new image from a container's changes + +Options: + -a, --author string Author (e.g., "John Hannibal Smith ") + -c, --change value Apply Dockerfile instruction to the created image (default []) + --help Print usage + -m, --message string Commit message + -p, --pause Pause container during commit (default true) +``` + +## Description + +It can be useful to commit a container's file changes or settings into a new +image. This allows you to debug a container by running an interactive shell, or to +export a working dataset to another server. Generally, it is better to use +Dockerfiles to manage your images in a documented and maintainable way. +[Read more about valid image names and tags](tag.md). + +The commit operation will not include any data contained in +volumes mounted inside the container. + +By default, the container being committed and its processes will be paused +while the image is committed. This reduces the likelihood of encountering data +corruption during the process of creating the commit. If this behavior is +undesired, set the `--pause` option to false. + +The `--change` option will apply `Dockerfile` instructions to the image that is +created. Supported `Dockerfile` instructions: +`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +## Examples + +### Commit a container + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky +197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton + +$ docker commit c3f279d17e0a svendowideit/testimage:version3 + +f5283438590d + +$ docker images + +REPOSITORY TAG ID CREATED SIZE +svendowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB +``` + +### Commit a container with new configurations + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky +197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton + +$ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a + +[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] + +$ docker commit --change "ENV DEBUG true" c3f279d17e0a svendowideit/testimage:version3 + +f5283438590d + +$ docker inspect -f "{{ .Config.Env }}" f5283438590d + +[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true] +``` + +### Commit a container with new `CMD` and `EXPOSE` instructions + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky +197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton + +$ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a svendowideit/testimage:version4 + +f5283438590d + +$ docker run -d svendowideit/testimage:version4 + +89373736e2e7f00bc149bd783073ac43d0507da250e999f3f1036e0db60817c0 + +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +89373736e2e7 testimage:version4 "apachectl -DFOREGROU" 3 seconds ago Up 2 seconds 80/tcp distracted_fermat +c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky +197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton +``` diff --git a/components/engine/docs/reference/commandline/container.md b/components/engine/docs/reference/commandline/container.md new file mode 100644 index 0000000000..5eefbf2c3e --- /dev/null +++ b/components/engine/docs/reference/commandline/container.md @@ -0,0 +1,61 @@ + +--- +title: "container" +description: "The container command description and usage" +keywords: "container" +--- + + + +# container + +```markdown +Usage: docker container COMMAND + +Manage containers + +Options: + --help Print usage + +Commands: + attach Attach to a running container + commit Create a new image from a container's changes + cp Copy files/folders between a container and the local filesystem + create Create a new container + diff Inspect changes to files or directories on a container's filesystem + exec Run a command in a running container + export Export a container's filesystem as a tar archive + inspect Display detailed information on one or more containers + kill Kill one or more running containers + logs Fetch the logs of a container + ls List containers + pause Pause all processes within one or more containers + port List port mappings or a specific mapping for the container + prune Remove all stopped containers + rename Rename a container + restart Restart one or more containers + rm Remove one or more containers + run Run a command in a new container + start Start one or more stopped containers + stats Display a live stream of container(s) resource usage statistics + stop Stop one or more running containers + top Display the running processes of a container + unpause Unpause all processes within one or more containers + update Update configuration of one or more containers + wait Block until one or more containers stop, then print their exit codes + +Run 'docker container COMMAND --help' for more information on a command. + +``` + +## Description + +Manage containers. + diff --git a/components/engine/docs/reference/commandline/container_prune.md b/components/engine/docs/reference/commandline/container_prune.md new file mode 100644 index 0000000000..72488901ed --- /dev/null +++ b/components/engine/docs/reference/commandline/container_prune.md @@ -0,0 +1,126 @@ +--- +title: "container prune" +description: "Remove all stopped containers" +keywords: container, prune, delete, remove +--- + + + +# container prune + +```markdown +Usage: docker container prune [OPTIONS] + +Remove all stopped containers + +Options: +Options: + --filter filter Provide filter values (e.g. 'until=') + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Description + +Removes all stopped containers. + +## Examples + +### Prune containers + +```bash +$ docker container prune +WARNING! This will remove all stopped containers. +Are you sure you want to continue? [y/N] y +Deleted Containers: +4a7f7eebae0f63178aff7eb0aa39cd3f0627a203ab2df258c1a00b456cf20063 +f98f9c2aa1eaf727e4ec9c0283bc7d4aa4762fbdba7f26191f26c97f64090360 + +Total reclaimed space: 212 B +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* until (``) - only remove containers created before given timestamp +* label (`label=`, `label==`, `label!=`, or `label!==`) - only remove containers with (or without, in case `label!=...` is used) the specified labels. + +The `until` filter can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the daemon machine’s time. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the daemon will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +The `label` filter accepts two formats. One is the `label=...` (`label=` or `label==`), +which removes containers with the specified labels. The other +format is the `label!=...` (`label!=` or `label!==`), which removes +containers without the specified labels. + +The following removes containers created more than 5 minutes ago: + +```bash +$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}' + +CONTAINER ID IMAGE COMMAND CREATED AT STATUS +61b9efa71024 busybox "sh" 2017-01-04 13:23:33 -0800 PST Exited (0) 41 seconds ago +53a9bc23a516 busybox "sh" 2017-01-04 13:11:59 -0800 PST Exited (0) 12 minutes ago + +$ docker container prune --force --filter "until=5m" + +Deleted Containers: +53a9bc23a5168b6caa2bfbefddf1b30f93c7ad57f3dec271fd32707497cb9369 + +Total reclaimed space: 25 B + +$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}' + +CONTAINER ID IMAGE COMMAND CREATED AT STATUS +61b9efa71024 busybox "sh" 2017-01-04 13:23:33 -0800 PST Exited (0) 44 seconds ago +``` + +The following removes containers created before `2017-01-04T13:10:00`: + +```bash +$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}' + +CONTAINER ID IMAGE COMMAND CREATED AT STATUS +53a9bc23a516 busybox "sh" 2017-01-04 13:11:59 -0800 PST Exited (0) 7 minutes ago +4a75091a6d61 busybox "sh" 2017-01-04 13:09:53 -0800 PST Exited (0) 9 minutes ago + +$ docker container prune --force --filter "until=2017-01-04T13:10:00" + +Deleted Containers: +4a75091a6d618526fcd8b33ccd6e5928ca2a64415466f768a6180004b0c72c6c + +Total reclaimed space: 27 B + +$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}' + +CONTAINER ID IMAGE COMMAND CREATED AT STATUS +53a9bc23a516 busybox "sh" 2017-01-04 13:11:59 -0800 PST Exited (0) 9 minutes ago +``` + +## Related commands + +* [system df](system_df.md) +* [volume prune](volume_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/components/engine/docs/reference/commandline/cp.md b/components/engine/docs/reference/commandline/cp.md new file mode 100644 index 0000000000..5cbbee25ae --- /dev/null +++ b/components/engine/docs/reference/commandline/cp.md @@ -0,0 +1,115 @@ +--- +title: "cp" +description: "The cp command description and usage" +keywords: "copy, container, files, folders" +--- + + + +# cp + +```markdown +Usage: docker cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- + docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH + +Copy files/folders between a container and the local filesystem + +Use '-' as the source to read a tar archive from stdin +and extract it to a directory destination in a container. +Use '-' as the destination to stream a tar archive of a +container source to stdout. + +Options: + -L, --follow-link Always follow symbol link in SRC_PATH + --help Print usage +``` + +## Description + +The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. +You can copy from the container's file system to the local machine or the +reverse, from the local filesystem to the container. If `-` is specified for +either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from +`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. +The `SRC_PATH` or `DEST_PATH` can be a file or directory. + +The `docker cp` command assumes container paths are relative to the container's +`/` (root) directory. This means supplying the initial forward slash is optional; +The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and +`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can +be an absolute or relative value. The command interprets a local machine's +relative paths as relative to the current working directory where `docker cp` is +run. + +The `cp` command behaves like the Unix `cp -a` command in that directories are +copied recursively with permissions preserved if possible. Ownership is set to +the user and primary group at the destination. For example, files copied to a +container are created with `UID:GID` of the root user. Files copied to the local +machine are created with the `UID:GID` of the user which invoked the `docker cp` +command. If you specify the `-L` option, `docker cp` follows any symbolic link +in the `SRC_PATH`. `docker cp` does *not* create parent directories for +`DEST_PATH` if they do not exist. + +Assuming a path separator of `/`, a first argument of `SRC_PATH` and second +argument of `DEST_PATH`, the behavior is as follows: + +- `SRC_PATH` specifies a file + - `DEST_PATH` does not exist + - the file is saved to a file created at `DEST_PATH` + - `DEST_PATH` does not exist and ends with `/` + - Error condition: the destination directory must exist. + - `DEST_PATH` exists and is a file + - the destination is overwritten with the source file's contents + - `DEST_PATH` exists and is a directory + - the file is copied into this directory using the basename from + `SRC_PATH` +- `SRC_PATH` specifies a directory + - `DEST_PATH` does not exist + - `DEST_PATH` is created as a directory and the *contents* of the source + directory are copied into this directory + - `DEST_PATH` exists and is a file + - Error condition: cannot copy a directory to a file + - `DEST_PATH` exists and is a directory + - `SRC_PATH` does not end with `/.` (that is: _slash_ followed by _dot_) + - the source directory is copied into this directory + - `SRC_PATH` does end with `/.` (that is: _slash_ followed by _dot_) + - the *content* of the source directory is copied into this + directory + +The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above +rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not +the target, is copied by default. To copy the link target and not the link, specify +the `-L` option. + +A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can +also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local +machine, for example `file:name.txt`. If you use a `:` in a local machine path, +you must be explicit with a relative or absolute path, for example: + + `/path/to/file:name.txt` or `./file:name.txt` + +It is not possible to copy certain system files such as resources under +`/proc`, `/sys`, `/dev`, [tmpfs](run.md#mount-tmpfs-tmpfs), and mounts created by +the user in the container. However, you can still copy such files by manually +running `tar` in `docker exec`. Both of the following examples do the same thing +in different ways (consider `SRC_PATH` and `DEST_PATH` are directories): + +```bash +$ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - +``` + +```bash +$ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - +``` + +Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. +The command extracts the content of the tar to the `DEST_PATH` in container's +filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as +the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. diff --git a/components/engine/docs/reference/commandline/create.md b/components/engine/docs/reference/commandline/create.md new file mode 100644 index 0000000000..8a57f2ffe9 --- /dev/null +++ b/components/engine/docs/reference/commandline/create.md @@ -0,0 +1,260 @@ +--- +title: "create" +description: "The create command description and usage" +keywords: "docker, create, container" +--- + + + +# create + +Creates a new container. + +```markdown +Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] + +Create a new container + +Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + --blkio-weight value Block IO (relative weight), between 10 and 1000 + --blkio-weight-device value Block IO weight (relative device weight) (default []) + --cap-add value Add Linux capabilities (default []) + --cap-drop value Drop Linux capabilities (default []) + --cgroup-parent string Optional parent cgroup for the container + --cidfile string Write the container ID to the file + --cpu-count int The number of CPUs available for execution by the container. + Windows daemon only. On Windows Server containers, this is + approximated as a percentage of total CPU usage. + --cpu-percent int CPU percent (Windows only) + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpus NanoCPUs Number of CPUs (default 0.000) + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --device value Add a host device to the container (default []) + --device-cgroup-rule value Add a rule to the cgroup allowed devices list + --device-read-bps value Limit read rate (bytes per second) from a device (default []) + --device-read-iops value Limit read rate (IO per second) from a device (default []) + --device-write-bps value Limit write rate (bytes per second) to a device (default []) + --device-write-iops value Limit write rate (IO per second) to a device (default []) + --disable-content-trust Skip image verification (default true) + --dns value Set custom DNS servers (default []) + --dns-option value Set DNS options (default []) + --dns-search value Set custom DNS search domains (default []) + --entrypoint string Overwrite the default ENTRYPOINT of the image + -e, --env value Set environment variables (default []) + --env-file value Read in a file of environment variables (default []) + --expose value Expose a port or a range of ports (default []) + --group-add value Add additional groups to join (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) + --health-start-period duration Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h) (default 0s) + --help Print usage + -h, --hostname string Container host name + --init Run an init inside the container that forwards signals and reaps processes + -i, --interactive Keep STDIN open even if not attached + --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) + --io-maxiops uint Maximum IOps limit for the system drive (Windows only) + --ip string IPv4 address (e.g., 172.30.100.104) + --ip6 string IPv6 address (e.g., 2001:db8::33) + --ipc string IPC namespace to use + --isolation string Container isolation technology + --kernel-memory string Kernel memory limit + -l, --label value Set meta data on a container (default []) + --label-file value Read in a line delimited file of labels (default []) + --link value Add link to another container (default []) + --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) + --log-driver string Logging driver for the container + --log-opt value Log driver options (default []) + --mac-address string Container MAC address (e.g., 92:d0:c6:0a:29:33) + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) + --mount value Attach a filesytem mount to the container (default []) + --name string Assign a name to the container + --network-alias value Add network-scoped alias for the container (default []) + --network string Connect a container to a network (default "default") + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-healthcheck Disable any container-specified HEALTHCHECK + --oom-kill-disable Disable OOM Killer + --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) + --pid string PID namespace to use + --pids-limit int Tune container pids limit (set -1 for unlimited), kernel >= 4.3 + --privileged Give extended privileges to this container + -p, --publish value Publish a container's port(s) to the host (default []) + -P, --publish-all Publish all exposed ports to random ports + --read-only Mount the container's root filesystem as read only + --restart string Restart policy to apply when a container exits (default "no") + Possible values are: no, on-failure[:max-retry], always, unless-stopped + --rm Automatically remove the container when it exits + --runtime string Runtime to use for this container + --security-opt value Security Options (default []) + --shm-size bytes Size of /dev/shm + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --stop-signal string Signal to stop a container (default "SIGTERM") + --stop-timeout=10 Timeout (in seconds) to stop a container + --storage-opt value Storage driver options for the container (default []) + --sysctl value Sysctl options (default map[]) + --tmpfs value Mount a tmpfs directory (default []) + -t, --tty Allocate a pseudo-TTY + --ulimit value Ulimit options (default []) + -u, --user string Username or UID (format: [:]) + --userns string User namespace to use + 'host': Use the Docker host user namespace + '': Use the Docker daemon user namespace specified by `--userns-remap` option. + --uts string UTS namespace to use + -v, --volume value Bind mount a volume (default []). The format + is `[host-src:]container-dest[:]`. + The comma-delimited `options` are [rw|ro], + [z|Z], [[r]shared|[r]slave|[r]private], + [delegated|cached|consistent], and + [nocopy]. The 'host-src' is an absolute path + or a name value. + --volume-driver string Optional volume driver for the container + --volumes-from value Mount volumes from the specified container(s) (default []) + -w, --workdir string Working directory inside the container +``` +## Description + +The `docker create` command creates a writeable container layer over the +specified image and prepares it for running the specified command. The +container ID is then printed to `STDOUT`. This is similar to `docker run -d` +except the container is never started. You can then use the +`docker start ` command to start the container at any point. + +This is useful when you want to set up a container configuration ahead of time +so that it is ready to start when you need it. The initial status of the +new container is `created`. + +Please see the [run command](run.md) section and the [Docker run reference](../run.md) for more details. + +## Examples + +### Create and start a container + +```bash +$ docker create -t -i fedora bash + +6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752 + +$ docker start -a -i 6d8af538ec5 + +bash-4.2# +``` + +### Initialize volumes + +As of v1.4.0 container volumes are initialized during the `docker create` phase +(i.e., `docker run` too). For example, this allows you to `create` the `data` +volume container, and then use it from another container: + +```bash +$ docker create -v /data --name data ubuntu + +240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57 + +$ docker run --rm --volumes-from data ubuntu ls -la /data + +total 8 +drwxr-xr-x 2 root root 4096 Dec 5 04:10 . +drwxr-xr-x 48 root root 4096 Dec 5 04:11 .. +``` + +Similarly, `create` a host directory bind mounted volume container, which can +then be used from the subsequent container: + +```bash +$ docker create -v /home/docker:/docker --name docker ubuntu + +9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03 + +$ docker run --rm --volumes-from docker ubuntu ls -la /docker + +total 20 +drwxr-sr-x 5 1000 staff 180 Dec 5 04:00 . +drwxr-xr-x 48 root root 4096 Dec 5 04:13 .. +-rw-rw-r-- 1 1000 staff 3833 Dec 5 04:01 .ash_history +-rw-r--r-- 1 1000 staff 446 Nov 28 11:51 .ashrc +-rw-r--r-- 1 1000 staff 25 Dec 5 04:00 .gitconfig +drwxr-sr-x 3 1000 staff 60 Dec 1 03:28 .local +-rw-r--r-- 1 1000 staff 920 Nov 28 11:51 .profile +drwx--S--- 2 1000 staff 460 Dec 5 00:51 .ssh +drwxr-xr-x 32 1000 staff 1140 Dec 5 04:01 docker +``` + + +Set storage driver options per container. + +```bash +$ docker create -it --storage-opt size=120G fedora /bin/bash +``` + +This (size) will allow to set the container rootfs size to 120G at creation time. +This option is only available for the `devicemapper`, `btrfs`, `overlay2`, +`windowsfilter` and `zfs` graph drivers. +For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, +user cannot pass a size less than the Default BaseFS Size. +For the `overlay2` storage driver, the size option is only available if the +backing fs is `xfs` and mounted with the `pquota` mount option. +Under these conditions, user can pass any size less then the backing fs size. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + + +| Value | Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value if the +daemon is running on Windows server, or `hyperv` if running on Windows client. | +| `process` | Namespace isolation only. | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + +### Dealing with dynamically created devices (--device-cgroup-rule) + +Devices available to a container are assigned at creation time. The +assigned devices will both be added to the cgroup.allow file and +created into the container once it is run. This poses a problem when +a new device needs to be added to running container. + +One of the solution is to add a more permissive rule to a container +allowing it access to a wider range of devices. For example, supposing +our container needs access to a character device with major `42` and +any number of minor number (added as new devices appear), the +following rule would be added: + +``` +docker create --device-cgroup-rule='c 42:* rmw' -name my-container my-image +``` + +Then, a user could ask `udev` to execute a script that would `docker exec my-container mknod newDevX c 42 ` +the required device when it is added. + +NOTE: initially present devices still need to be explicitely added to +the create/run command diff --git a/components/engine/docs/reference/commandline/deploy.md b/components/engine/docs/reference/commandline/deploy.md new file mode 100644 index 0000000000..a86b2b4b45 --- /dev/null +++ b/components/engine/docs/reference/commandline/deploy.md @@ -0,0 +1,112 @@ +--- +title: "deploy" +description: "The deploy command description and usage" +keywords: "stack, deploy" +advisory: "experimental" +--- + + + +# deploy (experimental) + +An alias for `stack deploy`. + +```markdown +Usage: docker deploy [OPTIONS] STACK + +Deploy a new stack or update an existing stack + +Aliases: + deploy, up + +Options: + --bundle-file string Path to a Distributed Application Bundle file + --compose-file string Path to a Compose file + --help Print usage + --prune Prune services that are no longer referenced + --with-registry-auth Send registry authentication details to Swarm agents +``` + +## Description + +Create and update a stack from a `compose` or a `dab` file on the swarm. This command +has to be run targeting a manager node. + +## Examples + +### Compose file + +The `deploy` command supports compose file version `3.0` and above. + +```bash +$ docker stack deploy --compose-file docker-compose.yml vossibility + +Ignoring unsupported options: links + +Creating network vossibility_vossibility +Creating network vossibility_default +Creating service vossibility_nsqd +Creating service vossibility_logstash +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_ghollector +Creating service vossibility_lookupd +``` + +You can verify that the services were correctly created + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +### DAB file + +```bash +$ docker stack deploy --bundle-file vossibility-stack.dab vossibility + +Loading bundle from vossibility-stack.dab +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_logstash +Creating service vossibility_lookupd +Creating service vossibility_nsqd +Creating service vossibility_vossibility-collector +``` + +You can verify that the services were correctly created: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## Related commands + +* [stack config](stack_config.md) +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/components/engine/docs/reference/commandline/diff.md b/components/engine/docs/reference/commandline/diff.md new file mode 100644 index 0000000000..e6e12cef80 --- /dev/null +++ b/components/engine/docs/reference/commandline/diff.md @@ -0,0 +1,67 @@ +--- +title: "diff" +description: "The diff command description and usage" +keywords: "list, changed, files, container" +--- + + + +# diff + +```markdown +Usage: docker diff CONTAINER + +Inspect changes to files or directories on a container's filesystem + +Options: + --help Print usage +``` + +## Description + +List the changed files and directories in a container᾿s filesystem since the +container was created. Three different types of change are tracked: + +| Symbol | Description | +|--------|---------------------------------| +| `A` | A file or directory was added | +| `D` | A file or directory was deleted | +| `C` | A file or directory was changed | + +You can use the full or shortened container ID or the container name set using +`docker run --name` option. + +## Examples + +Inspect the changes to an `nginx` container: + +```bash +$ docker diff 1fdfd1f54c1b + +C /dev +C /dev/console +C /dev/core +C /dev/stdout +C /dev/fd +C /dev/ptmx +C /dev/stderr +C /dev/stdin +C /run +A /run/nginx.pid +C /var/lib/nginx/tmp +A /var/lib/nginx/tmp/client_body +A /var/lib/nginx/tmp/fastcgi +A /var/lib/nginx/tmp/proxy +A /var/lib/nginx/tmp/scgi +A /var/lib/nginx/tmp/uwsgi +C /var/log/nginx +A /var/log/nginx/access.log +A /var/log/nginx/error.log +``` diff --git a/components/engine/docs/reference/commandline/dockerd.md b/components/engine/docs/reference/commandline/dockerd.md new file mode 100644 index 0000000000..93774c841b --- /dev/null +++ b/components/engine/docs/reference/commandline/dockerd.md @@ -0,0 +1,1469 @@ +--- +title: "dockerd" +aliases: ["/engine/reference/commandline/daemon/"] +description: "The daemon command description and usage" +keywords: "container, daemon, runtime" +--- + + + +# daemon + +```markdown +Usage: dockerd COMMAND + +A self-sufficient runtime for containers. + +Options: + --add-runtime runtime Register an additional OCI compatible runtime (default []) + --allow-nondistributable-artifacts list Push nondistributable artifacts to specified registries (default []) + --api-cors-header string Set CORS headers in the Engine API + --authorization-plugin list Authorization plugins to load (default []) + --bip string Specify network bridge IP + -b, --bridge string Attach containers to a network bridge + --cgroup-parent string Set parent cgroup for all containers + --cluster-advertise string Address or interface name to advertise + --cluster-store string URL of the distributed storage backend + --cluster-store-opt map Set cluster store options (default map[]) + --config-file string Daemon configuration file (default "/etc/docker/daemon.json") + --containerd string Path to containerd socket + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --data-root string Root directory of persistent Docker state (default "/var/lib/docker") + -D, --debug Enable debug mode + --default-gateway ip Container default gateway IPv4 address + --default-gateway-v6 ip Container default gateway IPv6 address + --default-runtime string Default OCI runtime for containers (default "runc") + --default-ulimit ulimit Default ulimits for containers (default []) + --disable-legacy-registry Disable contacting legacy registries + --dns list DNS server to use (default []) + --dns-opt list DNS options to use (default []) + --dns-search list DNS search domains to use (default []) + --exec-opt list Runtime execution options (default []) + --exec-root string Root directory for execution state files (default "/var/run/docker") + --experimental Enable experimental features + --fixed-cidr string IPv4 subnet for fixed IPs + --fixed-cidr-v6 string IPv6 subnet for fixed IPs + -G, --group string Group for the unix socket (default "docker") + --help Print usage + -H, --host list Daemon socket(s) to connect to (default []) + --icc Enable inter-container communication (default true) + --init Run an init in the container to forward signals and reap processes + --init-path string Path to the docker-init binary + --insecure-registry list Enable insecure registry communication (default []) + --ip ip Default IP when binding container ports (default 0.0.0.0) + --ip-forward Enable net.ipv4.ip_forward (default true) + --ip-masq Enable IP masquerading (default true) + --iptables Enable addition of iptables rules (default true) + --ipv6 Enable IPv6 networking + --label list Set key=value labels to the daemon (default []) + --live-restore Enable live restore of docker when containers are still running + --log-driver string Default driver for container logs (default "json-file") + -l, --log-level string Set the logging level ("debug", "info", "warn", "error", "fatal") (default "info") + --log-opt map Default log driver options for containers (default map[]) + --max-concurrent-downloads int Set the max concurrent downloads for each pull (default 3) + --max-concurrent-uploads int Set the max concurrent uploads for each push (default 5) + --metrics-addr string Set default address and port to serve the metrics api on + --mtu int Set the containers network MTU + --no-new-privileges Set no-new-privileges by default for new containers + --oom-score-adjust int Set the oom_score_adj for the daemon (default -500) + -p, --pidfile string Path to use for daemon PID file (default "/var/run/docker.pid") + --raw-logs Full timestamps without ANSI coloring + --registry-mirror list Preferred Docker registry mirror (default []) + --seccomp-profile string Path to seccomp profile + --selinux-enabled Enable selinux support + --shutdown-timeout int Set the default shutdown timeout (default 15) + -s, --storage-driver string Storage driver to use + --storage-opt list Storage driver options (default []) + --swarm-default-advertise-addr string Set default address or interface for swarm advertised address + --tls Use TLS; implied by --tlsverify + --tlscacert string Trust certs signed only by this CA (default "~/.docker/ca.pem") + --tlscert string Path to TLS certificate file (default "~/.docker/cert.pem") + --tlskey string Path to TLS key file (default ~/.docker/key.pem") + --tlsverify Use TLS and verify the remote + --userland-proxy Use userland proxy for loopback traffic (default true) + --userland-proxy-path string Path to the userland proxy binary + --userns-remap string User/Group setting for user namespaces + -v, --version Print version information and quit +``` + +Options with [] may be specified multiple times. + +## Description + +`dockerd` is the persistent process that manages containers. Docker +uses different binaries for the daemon and client. To run the daemon you +type `dockerd`. + +To run the daemon with debug output, use `dockerd -D` or add `debug: true` to +the `daemon.json` file. + +> **Note**: In Docker 1.13 and higher, enable experimental features by starting +> `dockerd` with the `--experimental` flag or adding `experimental: true` to the +> `daemon.json` file. In earlier Docker versions, a different build was required +> to enable experimental features. + +## Examples + +### Daemon socket option + +The Docker daemon can listen for [Docker Engine API](../api/) +requests via three different types of Socket: `unix`, `tcp`, and `fd`. + +By default, a `unix` domain socket (or IPC socket) is created at +`/var/run/docker.sock`, requiring either `root` permission, or `docker` group +membership. + +If you need to access the Docker daemon remotely, you need to enable the `tcp` +Socket. Beware that the default setup provides un-encrypted and +un-authenticated direct access to the Docker daemon - and should be secured +either using the [built in HTTPS encrypted socket](https://docs.docker.com/engine/security/https/), or by +putting a secure web proxy in front of it. You can listen on port `2375` on all +network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network +interface using its IP address: `-H tcp://192.168.59.103:2375`. It is +conventional to use port `2375` for un-encrypted, and port `2376` for encrypted +communication with the daemon. + +> **Note**: If you're using an HTTPS encrypted socket, keep in mind that only +> TLS1.0 and greater are supported. Protocols SSLv3 and under are not +> supported anymore for security reasons. + +On Systemd based systems, you can communicate with the daemon via +[Systemd socket activation](http://0pointer.de/blog/projects/socket-activation.html), +use `dockerd -H fd://`. Using `fd://` will work perfectly for most setups but +you can also specify individual sockets: `dockerd -H fd://3`. If the +specified socket activated files aren't found, then Docker will exit. You can +find examples of using Systemd socket activation with Docker and Systemd in the +[Docker source tree](https://github.com/docker/docker/tree/master/contrib/init/systemd/). + +You can configure the Docker daemon to listen to multiple sockets at the same +time using multiple `-H` options: + +```bash +# listen using the default unix socket, and on 2 specific IP addresses on this host. + +$ sudo dockerd -H unix:///var/run/docker.sock -H tcp://192.168.59.106 -H tcp://10.10.10.2 +``` + +The Docker client will honor the `DOCKER_HOST` environment variable to set the +`-H` flag for the client. Use **one** of the following commands: + +```bash +$ docker -H tcp://0.0.0.0:2375 ps +``` + +```bash +$ export DOCKER_HOST="tcp://0.0.0.0:2375" + +$ docker ps +``` + +Setting the `DOCKER_TLS_VERIFY` environment variable to any value other than +the empty string is equivalent to setting the `--tlsverify` flag. The following +are equivalent: + +```bash +$ docker --tlsverify ps +# or +$ export DOCKER_TLS_VERIFY=1 +$ docker ps +``` + +The Docker client will honor the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` +environment variables (or the lowercase versions thereof). `HTTPS_PROXY` takes +precedence over `HTTP_PROXY`. + +#### Bind Docker to another host/port or a Unix socket + +> **Warning**: +> Changing the default `docker` daemon binding to a +> TCP port or Unix *docker* user group will increase your security risks +> by allowing non-root users to gain *root* access on the host. Make sure +> you control access to `docker`. If you are binding +> to a TCP port, anyone with access to that port has full Docker access; +> so it is not advisable on an open network. + +With `-H` it is possible to make the Docker daemon to listen on a +specific IP and port. By default, it will listen on +`unix:///var/run/docker.sock` to allow only local connections by the +*root* user. You *could* set it to `0.0.0.0:2375` or a specific host IP +to give access to everybody, but that is **not recommended** because +then it is trivial for someone to gain root access to the host where the +daemon is running. + +Similarly, the Docker client can use `-H` to connect to a custom port. +The Docker client will default to connecting to `unix:///var/run/docker.sock` +on Linux, and `tcp://127.0.0.1:2376` on Windows. + +`-H` accepts host and port assignment in the following format: + + tcp://[host]:[port][path] or unix://path + +For example: + +- `tcp://` -> TCP connection to `127.0.0.1` on either port `2376` when TLS encryption + is on, or port `2375` when communication is in plain text. +- `tcp://host:2375` -> TCP connection on + host:2375 +- `tcp://host:2375/path` -> TCP connection on + host:2375 and prepend path to all requests +- `unix://path/to/socket` -> Unix socket located + at `path/to/socket` + +`-H`, when empty, will default to the same value as +when no `-H` was passed in. + +`-H` also accepts short form for TCP bindings: `host:` or `host:port` or `:port` + +Run Docker in daemon mode: + +```bash +$ sudo /dockerd -H 0.0.0.0:5555 & +``` + +Download an `ubuntu` image: + +```bash +$ docker -H :5555 pull ubuntu +``` + +You can use multiple `-H`, for example, if you want to listen on both +TCP and a Unix socket + +```bash +# Run docker in daemon mode +$ sudo /dockerd -H tcp://127.0.0.1:2375 -H unix:///var/run/docker.sock & +# Download an ubuntu image, use default Unix socket +$ docker pull ubuntu +# OR use the TCP port +$ docker -H tcp://127.0.0.1:2375 pull ubuntu +``` + +### Daemon storage-driver + +The Docker daemon has support for several different image layer storage +drivers: `aufs`, `devicemapper`, `btrfs`, `zfs`, `overlay` and `overlay2`. + +The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that +is unlikely to be merged into the main kernel. These are also known to cause +some serious kernel crashes. However `aufs` allows containers to share +executable and shared library memory, so is a useful choice when running +thousands of containers with the same program or libraries. + +The `devicemapper` driver uses thin provisioning and Copy on Write (CoW) +snapshots. For each devicemapper graph location – typically +`/var/lib/docker/devicemapper` – a thin pool is created based on two block +devices, one for data and one for metadata. By default, these block devices +are created automatically by using loopback mounts of automatically created +sparse files. Refer to [Storage driver options](#storage-driver-options) below +for a way how to customize this setup. +[~jpetazzo/Resizing Docker containers with the Device Mapper plugin](http://jpetazzo.github.io/2014/01/29/docker-device-mapper-resize/) +article explains how to tune your existing setup without the use of options. + +The `btrfs` driver is very fast for `docker build` - but like `devicemapper` +does not share executable memory between devices. Use +`dockerd -s btrfs -g /mnt/btrfs_partition`. + +The `zfs` driver is probably not as fast as `btrfs` but has a longer track record +on stability. Thanks to `Single Copy ARC` shared blocks between clones will be +cached only once. Use `dockerd -s zfs`. To select a different zfs filesystem +set `zfs.fsname` option as described in [Storage driver options](#storage-driver-options). + +The `overlay` is a very fast union filesystem. It is now merged in the main +Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). `overlay` +also supports page cache sharing, this means multiple containers accessing +the same file can share a single page cache entry (or entries), it makes +`overlay` as efficient with memory as `aufs` driver. Call +`dockerd -s overlay` to use it. + +> **Note**: As promising as `overlay` is, the feature is still quite young and +> should not be used in production. Most notably, using `overlay` can cause +> excessive inode consumption (especially as the number of images grows), as +> well as > being incompatible with the use of RPMs. + +The `overlay2` uses the same fast union filesystem but takes advantage of +[additional features](https://lkml.org/lkml/2015/2/11/106) added in Linux +kernel 4.0 to avoid excessive inode consumption. Call `dockerd -s overlay2` +to use it. + +> **Note**: Both `overlay` and `overlay2` are currently unsupported on `btrfs` +> or any Copy on Write filesystem and should only be used over `ext4` partitions. + +### Options per storage driver + +Particular storage-driver can be configured with options specified with +`--storage-opt` flags. Options for `devicemapper` are prefixed with `dm`, +options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. + +#### Devicemapper options + +This is an example of the configuration file for devicemapper on Linux: + +```json +{ + "storage-driver": "devicemapper", + "storage-opts": [ + "dm.thinpooldev=/dev/mapper/thin-pool", + "dm.use_deferred_deletion=true", + "dm.use_deferred_removal=true" + ] +} +``` + +##### `dm.thinpooldev` + +Specifies a custom block storage device to use for the thin pool. + +If using a block device for device mapper storage, it is best to use `lvm` +to create and manage the thin-pool volume. This volume is then handed to Docker +to exclusively create snapshot volumes needed for images and containers. + +Managing the thin-pool outside of Engine makes for the most feature-rich +method of having Docker utilize device mapper thin provisioning as the +backing storage for Docker containers. The highlights of the lvm-based +thin-pool management feature include: automatic or interactive thin-pool +resize support, dynamically changing thin-pool features, automatic thinp +metadata checking when lvm activates the thin-pool, etc. + +As a fallback if no thin pool is provided, loopback files are +created. Loopback is very slow, but can be used without any +pre-configuration of storage. It is strongly recommended that you do +not use loopback in production. Ensure your Engine daemon has a +`--storage-opt dm.thinpooldev` argument provided. + +###### Example: + +```bash +$ sudo dockerd --storage-opt dm.thinpooldev=/dev/mapper/thin-pool +``` + +##### `dm.directlvm_device` + +As an alternative to providing a thin pool as above, Docker can setup a block +device for you. + +###### Example: + +```bash +$ sudo dockerd --storage-opt dm.directlvm_device=/dev/xvdf +``` + +##### `dm.thinp_percent` + +Sets the percentage of passed in block device to use for storage. + +###### Example: + +```bash +$ sudo dockerd --storage-opt dm.thinp_percent=95 +``` + +##### `dm.thinp_metapercent` + +Sets the percentage of the passed in block device to use for metadata storage. + +###### Example: + +```bash +$ sudo dockerd --storage-opt dm.thinp_metapercent=1 +``` + +##### `dm.thinp_autoextend_threshold` + +Sets the value of the percentage of space used before `lvm` attempts to +autoextend the available space [100 = disabled] + +###### Example: + +```bash +$ sudo dockerd --storage-opt dm.thinp_autoextend_threshold=80 +``` + +##### `dm.thinp_autoextend_percent` + +Sets the value percentage value to increase the thin pool by when when `lvm` +attempts to autoextend the available space [100 = disabled] + +###### Example: + +```bash +$ sudo dockerd --storage-opt dm.thinp_autoextend_percent=20 +``` + + +##### `dm.basesize` + +Specifies the size to use when creating the base device, which limits the +size of images and containers. The default value is 10G. Note, thin devices +are inherently "sparse", so a 10G device which is mostly empty doesn't use +10 GB of space on the pool. However, the filesystem will use more space for +the empty case the larger the device is. + +The base device size can be increased at daemon restart which will allow +all future images and containers (based on those new images) to be of the +new base device size. + +###### Examples + +```bash +$ sudo dockerd --storage-opt dm.basesize=50G +``` + +This will increase the base device size to 50G. The Docker daemon will throw an +error if existing base device size is larger than 50G. A user can use +this option to expand the base device size however shrinking is not permitted. + +This value affects the system-wide "base" empty filesystem +that may already be initialized and inherited by pulled images. Typically, +a change to this value requires additional steps to take effect: + + ```bash +$ sudo service docker stop + +$ sudo rm -rf /var/lib/docker + +$ sudo service docker start +``` + + +##### `dm.loopdatasize` + +> **Note**: This option configures devicemapper loopback, which should not +> be used in production. + +Specifies the size to use when creating the loopback file for the +"data" device which is used for the thin pool. The default size is +100G. The file is sparse, so it will not initially take up this +much space. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.loopdatasize=200G +``` + +##### `dm.loopmetadatasize` + +> **Note**: This option configures devicemapper loopback, which should not +> be used in production. + +Specifies the size to use when creating the loopback file for the +"metadata" device which is used for the thin pool. The default size +is 2G. The file is sparse, so it will not initially take up +this much space. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.loopmetadatasize=4G +``` + +##### `dm.fs` + +Specifies the filesystem type to use for the base device. The supported +options are "ext4" and "xfs". The default is "xfs" + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.fs=ext4 +``` + +##### `dm.mkfsarg` + +Specifies extra mkfs arguments to be used when creating the base device. + +###### Example + +```bash +$ sudo dockerd --storage-opt "dm.mkfsarg=-O ^has_journal" +``` + +##### `dm.mountopt` + +Specifies extra mount options used when mounting the thin devices. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.mountopt=nodiscard +``` + +##### `dm.datadev` + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for data for the thin pool. + +If using a block device for device mapper storage, ideally both `datadev` and +`metadatadev` should be specified to completely avoid using the loopback +device. + +###### Example + +```bash +$ sudo dockerd \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 +``` + +##### `dm.metadatadev` + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for metadata for the thin pool. + +For best performance the metadata should be on a different spindle than the +data, or even better on an SSD. + +If setting up a new metadata pool it is required to be valid. This can be +achieved by zeroing the first 4k to indicate empty metadata, like this: + +```bash +$ dd if=/dev/zero of=$metadata_dev bs=4096 count=1 +``` + +###### Example + +```bash +$ sudo dockerd \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 +``` + +##### `dm.blocksize` + +Specifies a custom blocksize to use for the thin pool. The default +blocksize is 64K. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.blocksize=512K +``` + +##### `dm.blkdiscard` + +Enables or disables the use of `blkdiscard` when removing devicemapper +devices. This is enabled by default (only) if using loopback devices and is +required to resparsify the loopback file on image/container removal. + +Disabling this on loopback can lead to *much* faster container removal +times, but will make the space used in `/var/lib/docker` directory not be +returned to the system for other use when containers are removed. + +###### Examples + +```bash +$ sudo dockerd --storage-opt dm.blkdiscard=false +``` + +##### `dm.override_udev_sync_check` + +Overrides the `udev` synchronization checks between `devicemapper` and `udev`. +`udev` is the device manager for the Linux kernel. + +To view the `udev` sync support of a Docker daemon that is using the +`devicemapper` driver, run: + +```bash +$ docker info +[...] +Udev Sync Supported: true +[...] +``` + +When `udev` sync support is `true`, then `devicemapper` and udev can +coordinate the activation and deactivation of devices for containers. + +When `udev` sync support is `false`, a race condition occurs between +the`devicemapper` and `udev` during create and cleanup. The race condition +results in errors and failures. (For information on these failures, see +[docker#4036](https://github.com/docker/docker/issues/4036)) + +To allow the `docker` daemon to start, regardless of `udev` sync not being +supported, set `dm.override_udev_sync_check` to true: + +```bash +$ sudo dockerd --storage-opt dm.override_udev_sync_check=true +``` + +When this value is `true`, the `devicemapper` continues and simply warns +you the errors are happening. + +> **Note**: The ideal is to pursue a `docker` daemon and environment that does +> support synchronizing with `udev`. For further discussion on this +> topic, see [docker#4036](https://github.com/docker/docker/issues/4036). +> Otherwise, set this flag for migrating existing Docker daemons to +> a daemon with a supported environment. + +##### `dm.use_deferred_removal` + +Enables use of deferred device removal if `libdm` and the kernel driver +support the mechanism. + +Deferred device removal means that if device is busy when devices are +being removed/deactivated, then a deferred removal is scheduled on +device. And devices automatically go away when last user of the device +exits. + +For example, when a container exits, its associated thin device is removed. +If that device has leaked into some other mount namespace and can't be +removed, the container exit still succeeds and this option causes the +system to schedule the device for deferred removal. It does not wait in a +loop trying to remove a busy device. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.use_deferred_removal=true +``` + +##### `dm.use_deferred_deletion` + +Enables use of deferred device deletion for thin pool devices. By default, +thin pool device deletion is synchronous. Before a container is deleted, +the Docker daemon removes any associated devices. If the storage driver +can not remove a device, the container deletion fails and daemon returns. + +```none +Error deleting container: Error response from daemon: Cannot destroy container +``` + +To avoid this failure, enable both deferred device deletion and deferred +device removal on the daemon. + +```bash +$ sudo dockerd \ + --storage-opt dm.use_deferred_deletion=true \ + --storage-opt dm.use_deferred_removal=true +``` + +With these two options enabled, if a device is busy when the driver is +deleting a container, the driver marks the device as deleted. Later, when +the device isn't in use, the driver deletes it. + +In general it should be safe to enable this option by default. It will help +when unintentional leaking of mount point happens across multiple mount +namespaces. + +##### `dm.min_free_space` + +Specifies the min free space percent in a thin pool require for new device +creation to succeed. This check applies to both free data space as well +as free metadata space. Valid values are from 0% - 99%. Value 0% disables +free space checking logic. If user does not specify a value for this option, +the Engine uses a default value of 10%. + +Whenever a new a thin pool device is created (during `docker pull` or during +container creation), the Engine checks if the minimum free space is +available. If sufficient space is unavailable, then device creation fails +and any relevant `docker` operation fails. + +To recover from this error, you must create more free space in the thin pool +to recover from the error. You can create free space by deleting some images +and containers from the thin pool. You can also add more storage to the thin +pool. + +To add more space to a LVM (logical volume management) thin pool, just add +more storage to the volume group container thin pool; this should automatically +resolve any errors. If your configuration uses loop devices, then stop the +Engine daemon, grow the size of loop files and restart the daemon to resolve +the issue. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.min_free_space=10% +``` + +##### `dm.xfs_nospace_max_retries` + +Specifies the maximum number of retries XFS should attempt to complete +IO when ENOSPC (no space) error is returned by underlying storage device. + +By default XFS retries infinitely for IO to finish and this can result +in unkillable process. To change this behavior one can set +xfs_nospace_max_retries to say 0 and XFS will not retry IO after getting +ENOSPC and will shutdown filesystem. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0 +``` + +#### ZFS options + +##### `zfs.fsname` + +Set zfs filesystem under which docker will create its own datasets. +By default docker will pick up the zfs filesystem where docker graph +(`/var/lib/docker`) is located. + +###### Example + +```bash +$ sudo dockerd -s zfs --storage-opt zfs.fsname=zroot/docker +``` + +#### Btrfs options + +##### `btrfs.min_space` + +Specifies the minimum size to use when creating the subvolume which is used +for containers. If user uses disk quota for btrfs when creating or running +a container with **--storage-opt size** option, docker should ensure the +**size** cannot be smaller than **btrfs.min_space**. + +###### Example + +```bash +$ sudo dockerd -s btrfs --storage-opt btrfs.min_space=10G +``` + +#### Overlay2 options + +##### `overlay2.override_kernel_check` + +Overrides the Linux kernel version check allowing overlay2. Support for +specifying multiple lower directories needed by overlay2 was added to the +Linux kernel in 4.0.0. However, some older kernel versions may be patched +to add multiple lower directory support for OverlayFS. This option should +only be used after verifying this support exists in the kernel. Applying +this option on a kernel without this support will cause failures on mount. + +### Docker runtime execution options + +The Docker daemon relies on a +[OCI](https://github.com/opencontainers/runtime-spec) compliant runtime +(invoked via the `containerd` daemon) as its interface to the Linux +kernel `namespaces`, `cgroups`, and `SELinux`. + +By default, the Docker daemon automatically starts `containerd`. If you want to +control `containerd` startup, manually start `containerd` and pass the path to +the `containerd` socket using the `--containerd` flag. For example: + +```bash +$ sudo dockerd --containerd /var/run/dev/docker-containerd.sock +``` + +Runtimes can be registered with the daemon either via the +configuration file or using the `--add-runtime` command line argument. + +The following is an example adding 2 runtimes via the configuration: + +```json +{ + "default-runtime": "runc", + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + } +} +``` + +This is the same example via the command line: + +```bash +$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement +``` + +> **Note**: Defining runtime arguments via the command line is not supported. + +#### Options for the runtime + +You can configure the runtime using options specified +with the `--exec-opt` flag. All the flag's options have the `native` prefix. A +single `native.cgroupdriver` option is available. + +The `native.cgroupdriver` option specifies the management of the container's +cgroups. You can only specify `cgroupfs` or `systemd`. If you specify +`systemd` and it is not available, the system errors out. If you omit the +`native.cgroupdriver` option,` cgroupfs` is used. + +This example sets the `cgroupdriver` to `systemd`: + +```bash +$ sudo dockerd --exec-opt native.cgroupdriver=systemd +``` + +Setting this option applies to all containers the daemon launches. + +Also Windows Container makes use of `--exec-opt` for special purpose. Docker user +can specify default container isolation technology with this, for example: + +```console +> dockerd --exec-opt isolation=hyperv +``` + +Will make `hyperv` the default isolation technology on Windows. If no isolation +value is specified on daemon start, on Windows client, the default is +`hyperv`, and on Windows server, the default is `process`. + +#### Daemon DNS options + +To set the DNS server for all Docker containers, use: + +```bash +$ sudo dockerd --dns 8.8.8.8 +``` + +To set the DNS search domain for all Docker containers, use: + +```bash +$ sudo dockerd --dns-search example.com +``` + +#### Allow push of nondistributable artifacts + +Some images (e.g., Windows base images) contain artifacts whose distribution is +restricted by license. When these images are pushed to a registry, restricted +artifacts are not included. + +To override this behavior for specific registries, use the +`--allow-nondistributable-artifacts` option in one of the following forms: + +* `--allow-nondistributable-artifacts myregistry:5000` tells the Docker daemon + to push nondistributable artifacts to myregistry:5000. +* `--allow-nondistributable-artifacts 10.1.0.0/16` tells the Docker daemon to + push nondistributable artifacts to all registries whose resolved IP address + is within the subnet described by the CIDR syntax. + +This option can be used multiple times. + +This option is useful when pushing images containing nondistributable artifacts +to a registry on an air-gapped network so hosts on that network can pull the +images without connecting to another server. + +> **Warning**: Nondistributable artifacts typically have restrictions on how +> and where they can be distributed and shared. Only use this feature to push +> artifacts to private registries and ensure that you are in compliance with +> any terms that cover redistributing nondistributable artifacts. + +#### Insecure registries + +Docker considers a private registry either secure or insecure. In the rest of +this section, *registry* is used for *private registry*, and `myregistry:5000` +is a placeholder example for a private registry. + +A secure registry uses TLS and a copy of its CA certificate is placed on the +Docker host at `/etc/docker/certs.d/myregistry:5000/ca.crt`. An insecure +registry is either not using TLS (i.e., listening on plain text HTTP), or is +using TLS with a CA certificate not known by the Docker daemon. The latter can +happen when the certificate was not found under +`/etc/docker/certs.d/myregistry:5000/`, or if the certificate verification +failed (i.e., wrong CA). + +By default, Docker assumes all, but local (see local registries below), +registries are secure. Communicating with an insecure registry is not possible +if Docker assumes that registry is secure. In order to communicate with an +insecure registry, the Docker daemon requires `--insecure-registry` in one of +the following two forms: + +* `--insecure-registry myregistry:5000` tells the Docker daemon that + myregistry:5000 should be considered insecure. +* `--insecure-registry 10.1.0.0/16` tells the Docker daemon that all registries + whose domain resolve to an IP address is part of the subnet described by the + CIDR syntax, should be considered insecure. + +The flag can be used multiple times to allow multiple registries to be marked +as insecure. + +If an insecure registry is not marked as insecure, `docker pull`, +`docker push`, and `docker search` will result in an error message prompting +the user to either secure or pass the `--insecure-registry` flag to the Docker +daemon as described above. + +Local registries, whose IP address falls in the 127.0.0.0/8 range, are +automatically marked as insecure as of Docker 1.3.2. It is not recommended to +rely on this, as it may change in the future. + +Enabling `--insecure-registry`, i.e., allowing un-encrypted and/or untrusted +communication, can be useful when running a local registry. However, +because its use creates security vulnerabilities it should ONLY be enabled for +testing purposes. For increased security, users should add their CA to their +system's list of trusted CAs instead of enabling `--insecure-registry`. + +##### Legacy Registries + +Enabling `--disable-legacy-registry` forces a docker daemon to only interact with registries which support the V2 protocol. Specifically, the daemon will not attempt `push`, `pull` and `login` to v1 registries. The exception to this is `search` which can still be performed on v1 registries. + +#### Running a Docker daemon behind an HTTPS_PROXY + +When running inside a LAN that uses an `HTTPS` proxy, the Docker Hub +certificates will be replaced by the proxy's certificates. These certificates +need to be added to your Docker host's configuration: + +1. Install the `ca-certificates` package for your distribution +2. Ask your network admin for the proxy's CA certificate and append them to + `/etc/pki/tls/certs/ca-bundle.crt` +3. Then start your Docker daemon with `HTTPS_PROXY=http://username:password@proxy:port/ dockerd`. + The `username:` and `password@` are optional - and are only needed if your + proxy is set up to require authentication. + +This will only add the proxy and authentication to the Docker daemon's requests - +your `docker build`s and running containers will need extra configuration to +use the proxy + +#### Default `ulimit` settings + +`--default-ulimit` allows you to set the default `ulimit` options to use for +all containers. It takes the same options as `--ulimit` for `docker run`. If +these defaults are not set, `ulimit` settings will be inherited, if not set on +`docker run`, from the Docker daemon. Any `--ulimit` options passed to +`docker run` will overwrite these defaults. + +Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to +set the maximum number of processes available to a user, not to a container. For details +please check the [run](run.md) reference. + +#### Node discovery + +The `--cluster-advertise` option specifies the `host:port` or `interface:port` +combination that this particular daemon instance should use when advertising +itself to the cluster. The daemon is reached by remote hosts through this value. +If you specify an interface, make sure it includes the IP address of the actual +Docker host. For Engine installation created through `docker-machine`, the +interface is typically `eth1`. + +The daemon uses [libkv](https://github.com/docker/libkv/) to advertise +the node within the cluster. Some key-value backends support mutual +TLS. To configure the client TLS settings used by the daemon can be configured +using the `--cluster-store-opt` flag, specifying the paths to PEM encoded +files. For example: + +```bash +$ sudo dockerd \ + --cluster-advertise 192.168.1.2:2376 \ + --cluster-store etcd://192.168.1.2:2379 \ + --cluster-store-opt kv.cacertfile=/path/to/ca.pem \ + --cluster-store-opt kv.certfile=/path/to/cert.pem \ + --cluster-store-opt kv.keyfile=/path/to/key.pem +``` + +The currently supported cluster store options are: + +| Option | Description | +|-----------------------|-------------| +| `discovery.heartbeat` | Specifies the heartbeat timer in seconds which is used by the daemon as a `keepalive` mechanism to make sure discovery module treats the node as alive in the cluster. If not configured, the default value is 20 seconds. | +| `discovery.ttl` | Specifies the TTL (time-to-live) in seconds which is used by the discovery module to timeout a node if a valid heartbeat is not received within the configured ttl value. If not configured, the default value is 60 seconds. | +| `kv.cacertfile` | Specifies the path to a local file with PEM encoded CA certificates to trust. | +| `kv.certfile` | Specifies the path to a local file with a PEM encoded certificate. This certificate is used as the client cert for communication with the Key/Value store. | +| `kv.keyfile` | Specifies the path to a local file with a PEM encoded private key. This private key is used as the client key for communication with the Key/Value store. | +| `kv.path` | Specifies the path in the Key/Value store. If not configured, the default value is 'docker/nodes'. | + +#### Access authorization + +Docker's access authorization can be extended by authorization plugins that your +organization can purchase or build themselves. You can install one or more +authorization plugins when you start the Docker `daemon` using the +`--authorization-plugin=PLUGIN_ID` option. + +```bash +$ sudo dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... +``` + +The `PLUGIN_ID` value is either the plugin's name or a path to its specification +file. The plugin's implementation determines whether you can specify a name or +path. Consult with your Docker administrator to get information about the +plugins available to you. + +Once a plugin is installed, requests made to the `daemon` through the +command line or Docker's Engine API are allowed or denied by the plugin. +If you have multiple plugins installed, each plugin, in order, must +allow the request for it to complete. + +For information about how to create an authorization plugin, see [authorization +plugin](../../extend/plugins_authorization.md) section in the Docker extend section of this documentation. + + +#### Daemon user namespace options + +The Linux kernel [user namespace support](http://man7.org/linux/man-pages/man7/user_namespaces.7.html) provides additional security by enabling +a process, and therefore a container, to have a unique range of user and +group IDs which are outside the traditional user and group range utilized by +the host system. Potentially the most important security improvement is that, +by default, container processes running as the `root` user will have expected +administrative privilege (with some restrictions) inside the container but will +effectively be mapped to an unprivileged `uid` on the host. + +When user namespace support is enabled, Docker creates a single daemon-wide mapping +for all containers running on the same engine instance. The mappings will +utilize the existing subordinate user and group ID feature available on all modern +Linux distributions. +The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and +[`/etc/subgid`](http://man7.org/linux/man-pages/man5/subgid.5.html) files will be +read for the user, and optional group, specified to the `--userns-remap` +parameter. If you do not wish to specify your own user and/or group, you can +provide `default` as the value to this flag, and a user will be created on your behalf +and provided subordinate uid and gid ranges. This default user will be named +`dockremap`, and entries will be created for it in `/etc/passwd` and +`/etc/group` using your distro's standard user and group creation tools. + +> **Note**: The single mapping per-daemon restriction is in place for now +> because Docker shares image layers from its local cache across all +> containers running on the engine instance. Since file ownership must be +> the same for all containers sharing the same layer content, the decision +> was made to map the file ownership on `docker pull` to the daemon's user and +> group mappings so that there is no delay for running containers once the +> content is downloaded. This design preserves the same performance for `docker +> pull`, `docker push`, and container startup as users expect with +> user namespaces disabled. + +##### Start the daemon with user namespaces enabled + +To enable user namespace support, start the daemon with the +`--userns-remap` flag, which accepts values in the following formats: + + - uid + - uid:gid + - username + - username:groupname + +If numeric IDs are provided, translation back to valid user or group names +will occur so that the subordinate uid and gid information can be read, given +these resources are name-based, not id-based. If the numeric ID information +provided does not exist as entries in `/etc/passwd` or `/etc/group`, daemon +startup will fail with an error message. + +**Example: starting with default Docker user management:** + +```bash +$ sudo dockerd --userns-remap=default +``` + +When `default` is provided, Docker will create - or find the existing - user and group +named `dockremap`. If the user is created, and the Linux distribution has +appropriate support, the `/etc/subuid` and `/etc/subgid` files will be populated +with a contiguous 65536 length range of subordinate user and group IDs, starting +at an offset based on prior entries in those files. For example, Ubuntu will +create the following range, based on an existing user named `user1` already owning +the first 65536 range: + +```bash +$ cat /etc/subuid +user1:100000:65536 +dockremap:165536:65536 +``` + +If you have a preferred/self-managed user with subordinate ID mappings already +configured, you can provide that username or uid to the `--userns-remap` flag. +If you have a group that doesn't match the username, you may provide the `gid` +or group name as well; otherwise the username will be used as the group name +when querying the system for the subordinate group ID range. + +The output of `docker info` can be used to determine if the daemon is running +with user namespaces enabled or not. If the daemon is configured with user +namespaces, the Security Options entry in the response will list "userns" as +one of the enabled security features. + +##### Behavior differences when user namespaces are enabled + +When you start the Docker daemon with `--userns-remap`, Docker segregates the graph directory +where the images are stored by adding an extra directory with a name corresponding to the +remapped UID and GID. For example, if the remapped UID and GID begin with `165536`, all +images and containers running with that remap setting are located in `/var/lib/docker/165536.165536` +instead of `/var/lib/docker/`. + +In addition, the files and directories within the new directory, which correspond to +images and container layers, are also owned by the new UID and GID. To set the ownership +correctly, you need to re-pull the images and restart the containers after starting the +daemon with `--userns-remap`. + +##### Detailed information on `subuid`/`subgid` ranges + +Given potential advanced use of the subordinate ID ranges by power users, the +following paragraphs define how the Docker daemon currently uses the range entries +found within the subordinate range files. + +The simplest case is that only one contiguous range is defined for the +provided user or group. In this case, Docker will use that entire contiguous +range for the mapping of host uids and gids to the container process. This +means that the first ID in the range will be the remapped root user, and the +IDs above that initial ID will map host ID 1 through the end of the range. + +From the example `/etc/subuid` content shown above, the remapped root +user would be uid 165536. + +If the system administrator has set up multiple ranges for a single user or +group, the Docker daemon will read all the available ranges and use the +following algorithm to create the mapping ranges: + +1. The range segments found for the particular user will be sorted by *start ID* ascending. +2. Map segments will be created from each range in increasing value with a length matching the length of each segment. Therefore the range segment with the lowest numeric starting value will be equal to the remapped root, and continue up through host uid/gid equal to the range segment length. As an example, if the lowest segment starts at ID 1000 and has a length of 100, then a map of 1000 -> 0 (the remapped root) up through 1100 -> 100 will be created from this segment. If the next segment starts at ID 10000, then the next map will start with mapping 10000 -> 101 up to the length of this second segment. This will continue until no more segments are found in the subordinate files for this user. +3. If more than five range segments exist for a single user, only the first five will be utilized, matching the kernel's limitation of only five entries in `/proc/self/uid_map` and `proc/self/gid_map`. + +##### Disable user namespace for a container + +If you enable user namespaces on the daemon, all containers are started +with user namespaces enabled. In some situations you might want to disable +this feature for a container, for example, to start a privileged container (see +[user namespace known restrictions](#user-namespace-known-restrictions)). +To enable those advanced features for a specific container use `--userns=host` +in the `run/exec/create` command. +This option will completely disable user namespace mapping for the container's user. + +##### User namespace known restrictions + +The following standard Docker features are currently incompatible when +running a Docker daemon with user namespaces enabled: + + - sharing PID or NET namespaces with the host (`--pid=host` or `--net=host`) + - Using `--privileged` mode flag on `docker run` (unless also specifying `--userns=host`) + +In general, user namespaces are an advanced feature and will require +coordination with other capabilities. For example, if volumes are mounted from +the host, file ownership will have to be pre-arranged if the user or +administrator wishes the containers to have expected access to the volume +contents. Note that when using external volume or graph driver plugins, those +external software programs must be made aware of user and group mapping ranges +if they are to work seamlessly with user namespace support. + +Finally, while the `root` user inside a user namespaced container process has +many of the expected admin privileges that go along with being the superuser, the +Linux kernel has restrictions based on internal knowledge that this is a user namespaced +process. The most notable restriction that we are aware of at this time is the +inability to use `mknod`. Permission will be denied for device creation even as +container `root` inside a user namespace. + +### Miscellaneous options + +IP masquerading uses address translation to allow containers without a public +IP to talk to other machines on the Internet. This may interfere with some +network topologies and can be disabled with `--ip-masq=false`. + +Docker supports softlinks for the Docker data directory (`/var/lib/docker`) and +for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be +set like this: + + DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 + # or + export DOCKER_TMPDIR=/mnt/disk2/tmp + /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 + +#### Default cgroup parent + +The `--cgroup-parent` option allows you to set the default cgroup parent +to use for containers. If this option is not set, it defaults to `/docker` for +fs cgroup driver and `system.slice` for systemd cgroup driver. + +If the cgroup has a leading forward slash (`/`), the cgroup is created +under the root cgroup, otherwise the cgroup is created under the daemon +cgroup. + +Assuming the daemon is running in cgroup `daemoncgroup`, +`--cgroup-parent=/foobar` creates a cgroup in +`/sys/fs/cgroup/memory/foobar`, whereas using `--cgroup-parent=foobar` +creates the cgroup in `/sys/fs/cgroup/memory/daemoncgroup/foobar` + +The systemd cgroup driver has different rules for `--cgroup-parent`. Systemd +represents hierarchy by slice and the name of the slice encodes the location in +the tree. So `--cgroup-parent` for systemd cgroups should be a slice name. A +name can consist of a dash-separated series of names, which describes the path +to the slice from the root slice. For example, `--cgroup-parent=user-a-b.slice` +means the memory cgroup for the container is created in +`/sys/fs/cgroup/memory/user.slice/user-a.slice/user-a-b.slice/docker-.scope`. + +This setting can also be set per container, using the `--cgroup-parent` +option on `docker create` and `docker run`, and takes precedence over +the `--cgroup-parent` option on the daemon. + +#### Daemon metrics + +The `--metrics-addr` option takes a tcp address to serve the metrics API. +This feature is still experimental, therefore, the daemon must be running in experimental +mode for this feature to work. + +To serve the metrics API on localhost:1337 you would specify `--metrics-addr 127.0.0.1:1337` +allowing you to make requests on the API at `127.0.0.1:1337/metrics` to receive metrics in the +[prometheus](https://prometheus.io/docs/instrumenting/exposition_formats/) format. + +If you are running a prometheus server you can add this address to your scrape configs +to have prometheus collect metrics on Docker. For more information +on prometheus you can view the website [here](https://prometheus.io/). + +```none +scrape_configs: + - job_name: 'docker' + static_configs: + - targets: ['127.0.0.1:1337'] +``` + +Please note that this feature is still marked as experimental as metrics and metric +names could change while this feature is still in experimental. Please provide +feedback on what you would like to see collected in the API. + +#### Daemon configuration file + +The `--config-file` option allows you to set any configuration option +for the daemon in a JSON format. This file uses the same flag names as keys, +except for flags that allow several entries, where it uses the plural +of the flag name, e.g., `labels` for the `label` flag. + +The options set in the configuration file must not conflict with options set +via flags. The docker daemon fails to start if an option is duplicated between +the file and the flags, regardless their value. We do this to avoid +silently ignore changes introduced in configuration reloads. +For example, the daemon fails to start if you set daemon labels +in the configuration file and also set daemon labels via the `--label` flag. +Options that are not present in the file are ignored when the daemon starts. + +##### On Linux + +The default location of the configuration file on Linux is +`/etc/docker/daemon.json`. The `--config-file` flag can be used to specify a + non-default location. + +This is a full example of the allowed configuration options on Linux: + +```json +{ + "authorization-plugins": [], + "data-root": "", + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": [], + "exec-root": "", + "experimental": false, + "storage-driver": "", + "storage-opts": [], + "labels": [], + "live-restore": true, + "log-driver": "", + "log-opts": {}, + "mtu": 0, + "pidfile": "", + "cluster-store": "", + "cluster-store-opts": {}, + "cluster-advertise": "", + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "default-shm-size": "64M", + "shutdown-timeout": 15, + "debug": true, + "hosts": [], + "log-level": "", + "tls": true, + "tlsverify": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "swarm-default-advertise-addr": "", + "api-cors-header": "", + "selinux-enabled": false, + "userns-remap": "", + "group": "", + "cgroup-parent": "", + "default-ulimits": {}, + "init": false, + "init-path": "/usr/libexec/docker-init", + "ipv6": false, + "iptables": false, + "ip-forward": false, + "ip-masq": false, + "userland-proxy": false, + "userland-proxy-path": "/usr/libexec/docker-proxy", + "ip": "0.0.0.0", + "bridge": "", + "bip": "", + "fixed-cidr": "", + "fixed-cidr-v6": "", + "default-gateway": "", + "default-gateway-v6": "", + "icc": false, + "raw-logs": false, + "allow-nondistributable-artifacts": [], + "registry-mirrors": [], + "seccomp-profile": "", + "insecure-registries": [], + "disable-legacy-registry": false, + "no-new-privileges": false, + "default-runtime": "runc", + "oom-score-adjust": -500, + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + } +} +``` + +> **Note:** You cannot set options in `daemon.json` that have already been set on +> daemon startup as a flag. +> On systems that use `systemd` to start the Docker daemon, `-H` is already set, so +> you cannot use the `hosts` key in `daemon.json` to add listening addresses. +> See https://docs.docker.com/engine/admin/systemd/#custom-docker-daemon-options for how +> to accomplish this task with a systemd drop-in file. + +##### On Windows + +The default location of the configuration file on Windows is + `%programdata%\docker\config\daemon.json`. The `--config-file` flag can be + used to specify a non-default location. + +This is a full example of the allowed configuration options on Windows: + +```json +{ + "authorization-plugins": [], + "data-root": "", + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": [], + "experimental": false, + "storage-driver": "", + "storage-opts": [], + "labels": [], + "log-driver": "", + "mtu": 0, + "pidfile": "", + "cluster-store": "", + "cluster-advertise": "", + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "shutdown-timeout": 15, + "debug": true, + "hosts": [], + "log-level": "", + "tlsverify": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "swarm-default-advertise-addr": "", + "group": "", + "default-ulimits": {}, + "bridge": "", + "fixed-cidr": "", + "raw-logs": false, + "allow-nondistributable-artifacts": [], + "registry-mirrors": [], + "insecure-registries": [], + "disable-legacy-registry": false +} +``` + +#### Configuration reload behavior + +Some options can be reconfigured when the daemon is running without requiring +to restart the process. We use the `SIGHUP` signal in Linux to reload, and a global event +in Windows with the key `Global\docker-daemon-config-$PID`. The options can +be modified in the configuration file but still will check for conflicts with +the provided flags. The daemon fails to reconfigure itself +if there are conflicts, but it won't stop execution. + +The list of currently supported options that can be reconfigured is this: + +- `debug`: it changes the daemon to debug mode when set to true. +- `cluster-store`: it reloads the discovery store with the new address. +- `cluster-store-opts`: it uses the new options to reload the discovery store. +- `cluster-advertise`: it modifies the address advertised after reloading. +- `labels`: it replaces the daemon labels with a new set of labels. +- `live-restore`: Enables [keeping containers alive during daemon downtime](https://docs.docker.com/engine/admin/live-restore/). +- `max-concurrent-downloads`: it updates the max concurrent downloads for each pull. +- `max-concurrent-uploads`: it updates the max concurrent uploads for each push. +- `default-runtime`: it updates the runtime to be used if not is + specified at container creation. It defaults to "default" which is + the runtime shipped with the official docker packages. +- `runtimes`: it updates the list of available OCI runtimes that can + be used to run containers +- `authorization-plugin`: specifies the authorization plugins to use. +- `allow-nondistributable-artifacts`: Replaces the set of registries to which the daemon will push nondistributable artifacts with a new set of registries. +- `insecure-registries`: it replaces the daemon insecure registries with a new set of insecure registries. If some existing insecure registries in daemon's configuration are not in newly reloaded insecure resgitries, these existing ones will be removed from daemon's config. +- `registry-mirrors`: it replaces the daemon registry mirrors with a new set of registry mirrors. If some existing registry mirrors in daemon's configuration are not in newly reloaded registry mirrors, these existing ones will be removed from daemon's config. + +Updating and reloading the cluster configurations such as `--cluster-store`, +`--cluster-advertise` and `--cluster-store-opts` will take effect only if +these configurations were not previously configured. If `--cluster-store` +has been provided in flags and `cluster-advertise` not, `cluster-advertise` +can be added in the configuration file without accompanied by `--cluster-store`. +Configuration reload will log a warning message if it detects a change in +previously configured cluster configurations. + + +### Run multiple daemons + +> **Note:** Running multiple daemons on a single host is considered as "experimental". The user should be aware of +> unsolved problems. This solution may not work properly in some cases. Solutions are currently under development +> and will be delivered in the near future. + +This section describes how to run multiple Docker daemons on a single host. To +run multiple daemons, you must configure each daemon so that it does not +conflict with other daemons on the same host. You can set these options either +by providing them as flags, or by using a [daemon configuration file](#daemon-configuration-file). + +The following daemon options must be configured for each daemon: + +```none +-b, --bridge= Attach containers to a network bridge +--exec-root=/var/run/docker Root of the Docker execdriver +--data-root=/var/lib/docker Root of persisted Docker data +-p, --pidfile=/var/run/docker.pid Path to use for daemon PID file +-H, --host=[] Daemon socket(s) to connect to +--iptables=true Enable addition of iptables rules +--config-file=/etc/docker/daemon.json Daemon configuration file +--tlscacert="~/.docker/ca.pem" Trust certs signed only by this CA +--tlscert="~/.docker/cert.pem" Path to TLS certificate file +--tlskey="~/.docker/key.pem" Path to TLS key file +``` + +When your daemons use different values for these flags, you can run them on the same host without any problems. +It is very important to properly understand the meaning of those options and to use them correctly. + +- The `-b, --bridge=` flag is set to `docker0` as default bridge network. It is created automatically when you install Docker. +If you are not using the default, you must create and configure the bridge manually or just set it to 'none': `--bridge=none` +- `--exec-root` is the path where the container state is stored. The default value is `/var/run/docker`. Specify the path for +your running daemon here. +- `--data-root` is the path where persisted data such as images, volumes, and +cluster state are stored. The default value is `/var/lib/docker`. To avoid any +conflict with other daemons, set this parameter separately for each daemon. +- `-p, --pidfile=/var/run/docker.pid` is the path where the process ID of the daemon is stored. Specify the path for your +pid file here. +- `--host=[]` specifies where the Docker daemon will listen for client connections. If unspecified, it defaults to `/var/run/docker.sock`. +- `--iptables=false` prevents the Docker daemon from adding iptables rules. If +multiple daemons manage iptables rules, they may overwrite rules set by another +daemon. Be aware that disabling this option requires you to manually add +iptables rules to expose container ports. If you prevent Docker from adding +iptables rules, Docker will also not add IP masquerading rules, even if you set +`--ip-masq` to `true`. Without IP masquerading rules, Docker containers will not be +able to connect to external hosts or the internet when using network other than +default bridge. +- `--config-file=/etc/docker/daemon.json` is the path where configuration file is stored. You can use it instead of +daemon flags. Specify the path for each daemon. +- `--tls*` Docker daemon supports `--tlsverify` mode that enforces encrypted and authenticated remote connections. +The `--tls*` options enable use of specific certificates for individual daemons. + +Example script for a separate “bootstrap” instance of the Docker daemon without network: + +```bash +$ sudo dockerd \ + -H unix:///var/run/docker-bootstrap.sock \ + -p /var/run/docker-bootstrap.pid \ + --iptables=false \ + --ip-masq=false \ + --bridge=none \ + --data-root=/var/lib/docker-bootstrap \ + --exec-root=/var/run/docker-bootstrap +``` diff --git a/components/engine/docs/reference/commandline/events.md b/components/engine/docs/reference/commandline/events.md new file mode 100644 index 0000000000..71475b43ec --- /dev/null +++ b/components/engine/docs/reference/commandline/events.md @@ -0,0 +1,345 @@ +--- +title: "events" +description: "The events command description and usage" +keywords: "events, container, report" +--- + + + +# events + +```markdown +Usage: docker events [OPTIONS] + +Get real time events from the server + +Options: + -f, --filter value Filter output based on conditions provided (default []) + --format string Format the output using the given Go template + --help Print usage + --since string Show all events created since timestamp + --until string Stream events until this timestamp +``` + +## Description + +Use `docker events` to get real-time events from the server. These events differ +per Docker object type. + +### Object types + +#### Containers + +Docker containers report the following events: + +- `attach` +- `commit` +- `copy` +- `create` +- `destroy` +- `detach` +- `die` +- `exec_create` +- `exec_detach` +- `exec_start` +- `export` +- `health_status` +- `kill` +- `oom` +- `pause` +- `rename` +- `resize` +- `restart` +- `start` +- `stop` +- `top` +- `unpause` +- `update` + +#### Images + +Docker images report the following events: + +- `delete` +- `import` +- `load` +- `pull` +- `push` +- `save` +- `tag` +- `untag` + +#### Plugins + +Docker plugins report the following events: + +- `install` +- `enable` +- `disable` +- `remove` + +#### Volumes + +Docker volumes report the following events: + +- `create` +- `mount` +- `unmount` +- `destroy` + +#### Networks + +Docker networks report the following events: + +- `create` +- `connect` +- `disconnect` +- `destroy` + +#### Daemons + +Docker daemons report the following events: + +- `reload` + +### Limiting, filtering, and formatting the output + +#### Limit events by time + +The `--since` and `--until` parameters can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the client machine’s time. If you do not provide the `--since` option, +the command returns only new and/or live events. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +#### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If you would +like to use multiple filters, pass multiple flags (e.g., +`--filter "foo=bar" --filter "bif=baz"`) + +Using the same filter multiple times will be handled as a *OR*; for example +`--filter container=588a23dac085 --filter container=a8f7720b8c22` will display +events for container 588a23dac085 *OR* container a8f7720b8c22 + +Using multiple filters will be handled as a *AND*; for example +`--filter container=588a23dac085 --filter event=start` will display events for +container container 588a23dac085 *AND* the event type is *start* + +The currently supported filters are: + +* container (`container=`) +* daemon (`daemon=`) +* event (`event=`) +* image (`image=`) +* label (`label=` or `label==`) +* network (`network=`) +* plugin (`plugin=`) +* type (`type=`) +* volume (`volume=`) + +#### Format + +If a format (`--format`) is specified, the given template will be executed +instead of the default +format. Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +If a format is set to `{{json .}}`, the events are streamed as valid JSON +Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . + +## Examples + +### Basic example + +You'll need two shells for this example. + +**Shell 1: Listening for events:** + +```bash +$ docker events +``` + +**Shell 2: Start and Stop containers:** + +```bash +$ docker create --name test alpine:latest top +$ docker start test +$ docker stop test +``` + +**Shell 1: (Again .. now showing events):** + +```none +2017-01-05T00:35:58.859401177+08:00 container create 0fdb48addc82871eb34eb23a847cfd033dedd1a0a37bef2e6d9eb3870fc7ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1f5ceda09d4300f3a846f0acfaa9a8bb0d89e775eb744c5acecd60e0529e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +``` + +To exit the `docker events` command, use `CTRL+C`. + +### Filter events by time + +You can filter the output by an absolute timestamp or relative time on the host +machine, using the following different time syntaxes: + +```bash +$ docker events --since 1483283804 +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --since '2017-01-05' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --since '2013-09-03T15:49:29' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --since '10m' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +``` + +### Filter events by criteria + +The following commands show several different ways to filter the `docker event` +output. + +```bash +$ docker events --filter 'event=stop' + +2017-01-05T00:40:22.880175420+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:41:17.888104182+08:00 container stop 2a8f...4e78 (image=alpine, name=kickass_brattain) + +$ docker events --filter 'image=alpine' + +2017-01-05T00:41:55.784240236+08:00 container create d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:41:55.913156783+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:42:01.106875249+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=15) +2017-01-05T00:42:11.111934041+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=9) +2017-01-05T00:42:11.119578204+08:00 container die d9cd...4d70 (exitCode=137, image=alpine, name=happy_meitner) +2017-01-05T00:42:11.173276611+08:00 container stop d9cd...4d70 (image=alpine, name=happy_meitner) + +$ docker events --filter 'container=test' + +2017-01-05T00:43:00.139719934+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:43:09.259951086+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:43:09.270102715+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:43:09.312556440+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --filter 'container=test' --filter 'container=d9cdb1525ea8' + +2017-01-05T00:44:11.517071981+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:44:17.685870901+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:44:29.757658470+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=9) +2017-01-05T00:44:29.767718510+08:00 container die 0fdb...ff37 (exitCode=137, image=alpine:latest, name=test) +2017-01-05T00:44:29.815798344+08:00 container destroy 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --filter 'container=test' --filter 'event=stop' + +2017-01-05T00:46:13.664099505+08:00 container stop a9d1...e130 (image=alpine, name=test) + +$ docker events --filter 'type=volume' + +2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) +2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562f...5025, destination=/foo, driver=local, propagation=rprivate) +2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562f...5025, driver=local) +2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + +$ docker events --filter 'type=network' + +2015-12-23T21:38:24.705709133Z network create 8b11...2c5b (name=test-event-network-local, type=bridge) +2015-12-23T21:38:25.119625123Z network connect 8b11...2c5b (name=test-event-network-local, container=b4be...c54e, type=bridge) + +$ docker events --filter 'container=container_1' --filter 'container=container_2' + +2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) +2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) +2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8) +2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + +$ docker events --filter 'type=volume' + +2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) +2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) +2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) +2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + +$ docker events --filter 'type=network' + +2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) +2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) + +$ docker events --filter 'type=plugin' + +2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) +2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) +``` + +### Format the output + +```bash +$ docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' + +Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +``` + +#### Format as JSON + +```none + $ docker events --format '{{json .}}' + + {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. + {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. + {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. +``` diff --git a/components/engine/docs/reference/commandline/exec.md b/components/engine/docs/reference/commandline/exec.md new file mode 100644 index 0000000000..e2e5d607b9 --- /dev/null +++ b/components/engine/docs/reference/commandline/exec.md @@ -0,0 +1,99 @@ +--- +title: "exec" +description: "The exec command description and usage" +keywords: "command, container, run, execute" +--- + + + +# exec + +```markdown +Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] + +Run a command in a running container + +Options: + -d, --detach Detached mode: run command in the background + --detach-keys Override the key sequence for detaching a container + -e, --env=[] Set environment variables + --help Print usage + -i, --interactive Keep STDIN open even if not attached + --privileged Give extended privileges to the command + -t, --tty Allocate a pseudo-TTY + -u, --user Username or UID (format: [:]) +``` + +## Description + +The `docker exec` command runs a new command in a running container. + +The command started using `docker exec` only runs while the container's primary +process (`PID 1`) is running, and it is not restarted if the container is +restarted. + +COMMAND will run in the default directory of the container. It the +underlying image has a custom directory specified with the WORKDIR directive +in its Dockerfile, this will be used instead. + +COMMAND should be an executable, a chained or a quoted command +will not work. Example: `docker exec -ti my_container "echo a && echo b"` will +not work, but `docker exec -ti my_container sh -c "echo a && echo b"` will. + +## Examples + +### Run `docker exec` on a running container + +First, start a container. + +```bash +$ docker run --name ubuntu_bash --rm -i -t ubuntu bash +``` + +This will create a container named `ubuntu_bash` and start a Bash session. + +Next, execute a command on the container. + +```bash +$ docker exec -d ubuntu_bash touch /tmp/execWorks +``` + +This will create a new file `/tmp/execWorks` inside the running container +`ubuntu_bash`, in the background. + +Next, execute an interactive `bash` shell on the container. + +```bash +$ docker exec -it ubuntu_bash bash +``` + +This will create a new Bash session in the container `ubuntu_bash`. + +### Try to run `docker exec` on a paused container + +If the container is paused, then the `docker exec` command will fail with an error: + +```bash +$ docker pause test + +test + +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +1ae3b36715d2 ubuntu:latest "bash" 17 seconds ago Up 16 seconds (Paused) test + +$ docker exec test ls + +FATA[0000] Error response from daemon: Container test is paused, unpause the container before exec + +$ echo $? +1 +``` diff --git a/components/engine/docs/reference/commandline/export.md b/components/engine/docs/reference/commandline/export.md new file mode 100644 index 0000000000..9de509714a --- /dev/null +++ b/components/engine/docs/reference/commandline/export.md @@ -0,0 +1,48 @@ +--- +title: "export" +description: "The export command description and usage" +keywords: "export, file, system, container" +--- + + + +# export + +```markdown +Usage: docker export [OPTIONS] CONTAINER + +Export a container's filesystem as a tar archive + +Options: + --help Print usage + -o, --output string Write to a file, instead of STDOUT +``` + +## Description + +The `docker export` command does not export the contents of volumes associated +with the container. If a volume is mounted on top of an existing directory in +the container, `docker export` will export the contents of the *underlying* +directory, not the contents of the volume. + +Refer to [Backup, restore, or migrate data volumes](https://docs.docker.com/engine/tutorials/dockervolumes/#backup-restore-or-migrate-data-volumes) +in the user guide for examples on exporting data in a volume. + +## Examples + +Each of these commands has the same result. + +```bash +$ docker export red_panda > latest.tar +``` + +```bash +$ docker export --output="latest.tar" red_panda +``` diff --git a/components/engine/docs/reference/commandline/history.md b/components/engine/docs/reference/commandline/history.md new file mode 100644 index 0000000000..014ceddaf3 --- /dev/null +++ b/components/engine/docs/reference/commandline/history.md @@ -0,0 +1,93 @@ +--- +title: "history" +description: "The history command description and usage" +keywords: "docker, image, history" +--- + + + +# history + +```markdown +Usage: docker history [OPTIONS] IMAGE + +Show the history of an image + +Options: + --format string Pretty-print images using a Go template + --help Print usage + -H, --human Print sizes and dates in human readable format (default true) + --no-trunc Don't truncate output + -q, --quiet Only show numeric IDs +``` + + +## Examples + +To see how the `docker:latest` image was built: + +```bash +$ docker history docker + +IMAGE CREATED CREATED BY SIZE COMMENT +3e23a5875458 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B +8578938dd170 8 days ago /bin/sh -c dpkg-reconfigure locales && loc 1.245 MB +be51b77efb42 8 days ago /bin/sh -c apt-get update && apt-get install 338.3 MB +4b137612be55 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB +750d58736b4b 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi : 2 weeks ago +: 2 weeks ago +: 2 weeks ago +: 2 weeks ago +: 3 weeks ago +: 3 weeks ago +: 3 weeks ago +``` diff --git a/components/engine/docs/reference/commandline/image.md b/components/engine/docs/reference/commandline/image.md new file mode 100644 index 0000000000..fef161aa99 --- /dev/null +++ b/components/engine/docs/reference/commandline/image.md @@ -0,0 +1,47 @@ + +--- +title: "image" +description: "The image command description and usage" +keywords: "image" +--- + + + +# image + +```markdown +Usage: docker image COMMAND + +Manage images + +Options: + --help Print usage + +Commands: + build Build an image from a Dockerfile + history Show the history of an image + import Import the contents from a tarball to create a filesystem image + inspect Display detailed information on one or more images + load Load an image from a tar archive or STDIN + ls List images + prune Remove unused images + pull Pull an image or a repository from a registry + push Push an image or a repository to a registry + rm Remove one or more images + save Save one or more images to a tar archive (streamed to STDOUT by default) + tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE + +Run 'docker image COMMAND --help' for more information on a command. + +``` + +## Description + +Manage images. diff --git a/components/engine/docs/reference/commandline/image_prune.md b/components/engine/docs/reference/commandline/image_prune.md new file mode 100644 index 0000000000..f3b1545407 --- /dev/null +++ b/components/engine/docs/reference/commandline/image_prune.md @@ -0,0 +1,171 @@ +--- +title: "image prune" +description: "Remove all stopped images" +keywords: "image, prune, delete, remove" +--- + + + +# image prune + +```markdown +Usage: docker image prune [OPTIONS] + +Remove unused images + +Options: + -a, --all Remove all unused images, not just dangling ones + --filter filter Provide filter values (e.g. 'until=') + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Description + +Remove all dangling images. If `-a` is specified, will also remove all images not referenced by any container. + +## Examples + +Example output: + +```bash +$ docker image prune -a + +WARNING! This will remove all images without at least one container associated to them. +Are you sure you want to continue? [y/N] y +Deleted Images: +untagged: alpine:latest +untagged: alpine@sha256:3dcdb92d7432d56604d4545cbd324b14e647b313626d99b889d0626de158f73a +deleted: sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba +deleted: sha256:4fe15f8d0ae69e169824f25f1d4da3015a48feeeeebb265cd2e328e15c6a869f +untagged: alpine:3.3 +untagged: alpine@sha256:4fa633f4feff6a8f02acfc7424efd5cb3e76686ed3218abf4ca0fa4a2a358423 +untagged: my-jq:latest +deleted: sha256:ae67841be6d008a374eff7c2a974cde3934ffe9536a7dc7ce589585eddd83aff +deleted: sha256:34f6f1261650bc341eb122313372adc4512b4fceddc2a7ecbb84f0958ce5ad65 +deleted: sha256:cf4194e8d8db1cb2d117df33f2c75c0369c3a26d96725efb978cc69e046b87e7 +untagged: my-curl:latest +deleted: sha256:b2789dd875bf427de7f9f6ae001940073b3201409b14aba7e5db71f408b8569e +deleted: sha256:96daac0cb203226438989926fc34dd024f365a9a8616b93e168d303cfe4cb5e9 +deleted: sha256:5cbd97a14241c9cd83250d6b6fc0649833c4a3e84099b968dd4ba403e609945e +deleted: sha256:a0971c4015c1e898c60bf95781c6730a05b5d8a2ae6827f53837e6c9d38efdec +deleted: sha256:d8359ca3b681cc5396a4e790088441673ed3ce90ebc04de388bfcd31a0716b06 +deleted: sha256:83fc9ba8fb70e1da31dfcc3c88d093831dbd4be38b34af998df37e8ac538260c +deleted: sha256:ae7041a4cc625a9c8e6955452f7afe602b401f662671cea3613f08f3d9343b35 +deleted: sha256:35e0f43a37755b832f0bbea91a2360b025ee351d7309dae0d9737bc96b6d0809 +deleted: sha256:0af941dd29f00e4510195dd00b19671bc591e29d1495630e7e0f7c44c1e6a8c0 +deleted: sha256:9fc896fc2013da84f84e45b3096053eb084417b42e6b35ea0cce5a3529705eac +deleted: sha256:47cf20d8c26c46fff71be614d9f54997edacfe8d46d51769706e5aba94b16f2b +deleted: sha256:2c675ee9ed53425e31a13e3390bf3f539bf8637000e4bcfbb85ee03ef4d910a1 + +Total reclaimed space: 16.43 MB +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* until (``) - only remove images created before given timestamp +* label (`label=`, `label==`, `label!=`, or `label!==`) - only remove images with (or without, in case `label!=...` is used) the specified labels. + +The `until` filter can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the daemon machine’s time. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the daemon will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +The `label` filter accepts two formats. One is the `label=...` (`label=` or `label==`), +which removes images with the specified labels. The other +format is the `label!=...` (`label!=` or `label!==`), which removes +images without the specified labels. + +The following removes images created before `2017-01-04T00:00:00`: + +```bash +$ docker images --format 'table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}' +REPOSITORY TAG IMAGE ID CREATED AT SIZE +foo latest 2f287ac753da 2017-01-04 13:42:23 -0800 PST 3.98 MB +alpine latest 88e169ea8f46 2016-12-27 10:17:25 -0800 PST 3.98 MB +busybox latest e02e811dd08f 2016-10-07 14:03:58 -0700 PDT 1.09 MB + +$ docker image prune -a --force --filter "until=2017-01-04T00:00:00" + +Deleted Images: +untagged: alpine:latest +untagged: alpine@sha256:dfbd4a3a8ebca874ebd2474f044a0b33600d4523d03b0df76e5c5986cb02d7e8 +untagged: busybox:latest +untagged: busybox@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 +deleted: sha256:e02e811dd08fd49e7f6032625495118e63f597eb150403d02e3238af1df240ba +deleted: sha256:e88b3f82283bc59d5e0df427c824e9f95557e661fcb0ea15fb0fb6f97760f9d9 + +Total reclaimed space: 1.093 MB + +$ docker images --format 'table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}' + +REPOSITORY TAG IMAGE ID CREATED AT SIZE +foo latest 2f287ac753da 2017-01-04 13:42:23 -0800 PST 3.98 MB +``` + +The following removes images created more than 10 days (`240h`) ago: + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +foo latest 2f287ac753da 14 seconds ago 3.98 MB +alpine latest 88e169ea8f46 8 days ago 3.98 MB +debian jessie 7b0a06c805e8 2 months ago 123 MB +busybox latest e02e811dd08f 2 months ago 1.09 MB +golang 1.7.0 138c2e655421 4 months ago 670 MB + +$ docker image prune -a --force --filter "until=240h" + +Deleted Images: +untagged: golang:1.7.0 +untagged: golang@sha256:6765038c2b8f407fd6e3ecea043b44580c229ccfa2a13f6d85866cf2b4a9628e +deleted: sha256:138c2e6554219de65614d88c15521bfb2da674cbb0bf840de161f89ff4264b96 +deleted: sha256:ec353c2e1a673f456c4b78906d0d77f9d9456cfb5229b78c6a960bfb7496b76a +deleted: sha256:fe22765feaf3907526b4921c73ea6643ff9e334497c9b7e177972cf22f68ee93 +deleted: sha256:ff845959c80148421a5c3ae11cc0e6c115f950c89bc949646be55ed18d6a2912 +deleted: sha256:a4320831346648c03db64149eafc83092e2b34ab50ca6e8c13112388f25899a7 +deleted: sha256:4c76020202ee1d9709e703b7c6de367b325139e74eebd6b55b30a63c196abaf3 +deleted: sha256:d7afd92fb07236c8a2045715a86b7d5f0066cef025018cd3ca9a45498c51d1d6 +deleted: sha256:9e63c5bce4585dd7038d830a1f1f4e44cb1a1515b00e620ac718e934b484c938 +untagged: debian:jessie +untagged: debian@sha256:c1af755d300d0c65bb1194d24bce561d70c98a54fb5ce5b1693beb4f7988272f +deleted: sha256:7b0a06c805e8f23807fb8856621c60851727e85c7bcb751012c813f122734c8d +deleted: sha256:f96222d75c5563900bc4dd852179b720a0885de8f7a0619ba0ac76e92542bbc8 + +Total reclaimed space: 792.6 MB + +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +foo latest 2f287ac753da About a minute ago 3.98 MB +alpine latest 88e169ea8f46 8 days ago 3.98 MB +busybox latest e02e811dd08f 2 months ago 1.09 MB +``` + +## Related commands + +* [system df](system_df.md) +* [container prune](container_prune.md) +* [volume prune](volume_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/components/engine/docs/reference/commandline/images.md b/components/engine/docs/reference/commandline/images.md new file mode 100644 index 0000000000..86a32aed27 --- /dev/null +++ b/components/engine/docs/reference/commandline/images.md @@ -0,0 +1,343 @@ +--- +title: "images" +description: "The images command description and usage" +keywords: "list, docker, images" +--- + + + +# images + +```markdown +Usage: docker images [OPTIONS] [REPOSITORY[:TAG]] + +List images + +Options: + -a, --all Show all images (default hides intermediate images) + --digests Show digests + -f, --filter value Filter output based on conditions provided (default []) + - dangling=(true|false) + - label= or label== + - before=([:tag]||) + - since=([:tag]||) + - reference=(pattern of an image reference) + --format string Pretty-print images using a Go template + --help Print usage + --no-trunc Don't truncate output + -q, --quiet Only show numeric IDs +``` + +## Description + +The default `docker images` will show all top level +images, their repository and tags, and their size. + +Docker images have intermediate layers that increase reusability, +decrease disk usage, and speed up `docker build` by +allowing each step to be cached. These intermediate layers are not shown +by default. + +The `SIZE` is the cumulative space taken up by the image and all +its parent images. This is also the disk space used by the contents of the +Tar file created when you `docker save` an image. + +An image will be listed more than once if it has multiple repository names +or tags. This single image (identifiable by its matching `IMAGE ID`) +uses up the `SIZE` listed only once. + +## Examples + +### List the most recently created images + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE + 77af4d6b9913 19 hours ago 1.089 GB +committ latest b6fa739cedf5 19 hours ago 1.089 GB + 78a85c484f71 19 hours ago 1.089 GB +docker latest 30557a29d5ab 20 hours ago 1.089 GB + 5ed6274db6ce 24 hours ago 1.089 GB +postgres 9 746b819f315e 4 days ago 213.4 MB +postgres 9.3 746b819f315e 4 days ago 213.4 MB +postgres 9.3.5 746b819f315e 4 days ago 213.4 MB +postgres latest 746b819f315e 4 days ago 213.4 MB +``` + +### List images by name and tag + +The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument +that restricts the list to images that match the argument. If you specify +`REPOSITORY`but no `TAG`, the `docker images` command lists all images in the +given repository. + +For example, to list all images in the "java" repository, run this command : + +```bash +$ docker images java + +REPOSITORY TAG IMAGE ID CREATED SIZE +java 8 308e519aac60 6 days ago 824.5 MB +java 7 493d82594c15 3 months ago 656.3 MB +java latest 2711b1d6f3aa 5 months ago 603.9 MB +``` + +The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, +`docker images jav` does not match the image `java`. + +If both `REPOSITORY` and `TAG` are provided, only images matching that +repository and tag are listed. To find all local images in the "java" +repository with tag "8" you can use: + +```bash +$ docker images java:8 + +REPOSITORY TAG IMAGE ID CREATED SIZE +java 8 308e519aac60 6 days ago 824.5 MB +``` + +If nothing matches `REPOSITORY[:TAG]`, the list is empty. + +```bash +$ docker images java:0 + +REPOSITORY TAG IMAGE ID CREATED SIZE +``` + +### List the full length image IDs + +```bash +$ docker images --no-trunc + +REPOSITORY TAG IMAGE ID CREATED SIZE + sha256:77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB +committest latest sha256:b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB + sha256:78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB +docker latest sha256:30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB + sha256:0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB + sha256:18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB + sha256:f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB +tryout latest sha256:2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB + sha256:5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB +``` + +### List image digests + +Images that use the v2 or later format have a content-addressable identifier +called a `digest`. As long as the input used to generate the image is +unchanged, the digest value is predictable. To list image digest values, use +the `--digests` flag: + +```bash +$ docker images --digests +REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE +localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB +``` + +When pushing or pulling to a 2.0 registry, the `push` or `pull` command +output includes the image digest. You can `pull` using a digest value. You can +also reference by digest in `create`, `run`, and `rmi` commands, as well as the +`FROM` image reference in a Dockerfile. + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* dangling (boolean - true or false) +* label (`label=` or `label==`) +* before (`[:]`, `` or ``) - filter images created before given id or references +* since (`[:]`, `` or ``) - filter images created since given id or references +* reference (pattern of an image reference) - filter images whose reference matches the specified pattern + +#### Show untagged images (dangling) + +```bash +$ docker images --filter "dangling=true" + +REPOSITORY TAG IMAGE ID CREATED SIZE + 8abc22fbb042 4 weeks ago 0 B + 48e5f45168b9 4 weeks ago 2.489 MB + bf747efa0e2f 4 weeks ago 0 B + 980fe10e5736 12 weeks ago 101.4 MB + dea752e4e117 12 weeks ago 101.4 MB + 511136ea3c5a 8 months ago 0 B +``` + +This will display untagged images that are the leaves of the images tree (not +intermediary layers). These images occur when a new build of an image takes the +`repo:tag` away from the image ID, leaving it as `:` or untagged. +A warning will be issued if trying to remove an image when a container is presently +using it. By having this flag it allows for batch cleanup. + +You can use this in conjunction with `docker rmi ...`: + +```bash +$ docker rmi $(docker images -f "dangling=true" -q) + +8abc22fbb042 +48e5f45168b9 +bf747efa0e2f +980fe10e5736 +dea752e4e117 +511136ea3c5a +``` + +> **Note**: Docker warns you if any containers exist that are using these +> untagged images. + + +#### Show images with a given label + +The `label` filter matches images based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches images with the `com.example.version` label regardless of its value. + +```bash +$ docker images --filter "label=com.example.version" + +REPOSITORY TAG IMAGE ID CREATED SIZE +match-me-1 latest eeae25ada2aa About a minute ago 188.3 MB +match-me-2 latest dea752e4e117 About a minute ago 188.3 MB +``` + +The following filter matches images with the `com.example.version` label with the `1.0` value. + +```bash +$ docker images --filter "label=com.example.version=1.0" + +REPOSITORY TAG IMAGE ID CREATED SIZE +match-me latest 511136ea3c5a About a minute ago 188.3 MB +``` + +In this example, with the `0.1` value, it returns an empty set because no matches were found. + +```bash +$ docker images --filter "label=com.example.version=0.1" +REPOSITORY TAG IMAGE ID CREATED SIZE +``` + +#### Filter images by time + +The `before` filter shows only images created before the image with +given id or reference. For example, having these images: + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +image1 latest eeae25ada2aa 4 minutes ago 188.3 MB +image2 latest dea752e4e117 9 minutes ago 188.3 MB +image3 latest 511136ea3c5a 25 minutes ago 188.3 MB +``` + +Filtering with `before` would give: + +```bash +$ docker images --filter "before=image1" + +REPOSITORY TAG IMAGE ID CREATED SIZE +image2 latest dea752e4e117 9 minutes ago 188.3 MB +image3 latest 511136ea3c5a 25 minutes ago 188.3 MB +``` + +Filtering with `since` would give: + +```bash +$ docker images --filter "since=image3" +REPOSITORY TAG IMAGE ID CREATED SIZE +image1 latest eeae25ada2aa 4 minutes ago 188.3 MB +image2 latest dea752e4e117 9 minutes ago 188.3 MB +``` + +#### Filter images by reference + +The `reference` filter shows only images whose reference matches +the specified pattern. + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +busybox latest e02e811dd08f 5 weeks ago 1.09 MB +busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB +busybox musl 733eb3059dce 5 weeks ago 1.21 MB +busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB +``` + +Filtering with `reference` would give: + +```bash +$ docker images --filter=reference='busy*:*libc' + +REPOSITORY TAG IMAGE ID CREATED SIZE +busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB +busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB +``` + +### Format the output + +The formatting option (`--format`) will pretty print container output +using a Go template. + +Valid placeholders for the Go template are listed below: + +| Placeholder | Description| +| ---- | ---- | +| `.ID` | Image ID | +| `.Repository` | Image repository | +| `.Tag` | Image tag | +| `.Digest` | Image digest | +| `.CreatedSince` | Elapsed time since the image was created | +| `.CreatedAt` | Time when the image was created | +| `.Size` | Image disk size | + +When using the `--format` option, the `image` command will either +output the data exactly as the template declares or, when using the +`table` directive, will include column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Repository` entries separated by a colon for all images: + +```bash +$ docker images --format "{{.ID}}: {{.Repository}}" + +77af4d6b9913: +b6fa739cedf5: committ +78a85c484f71: +30557a29d5ab: docker +5ed6274db6ce: +746b819f315e: postgres +746b819f315e: postgres +746b819f315e: postgres +746b819f315e: postgres +``` + +To list all images with their repository and tag in a table format you +can use: + +```bash +$ docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" + +IMAGE ID REPOSITORY TAG +77af4d6b9913 +b6fa739cedf5 committ latest +78a85c484f71 +30557a29d5ab docker latest +5ed6274db6ce +746b819f315e postgres 9 +746b819f315e postgres 9.3 +746b819f315e postgres 9.3.5 +746b819f315e postgres latest +``` diff --git a/components/engine/docs/reference/commandline/import.md b/components/engine/docs/reference/commandline/import.md new file mode 100644 index 0000000000..57edf650c9 --- /dev/null +++ b/components/engine/docs/reference/commandline/import.md @@ -0,0 +1,89 @@ +--- +title: "import" +description: "The import command description and usage" +keywords: "import, file, system, container" +--- + + + +# import + +```markdown +Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] + +Import the contents from a tarball to create a filesystem image + +Options: + -c, --change value Apply Dockerfile instruction to the created image (default []) + --help Print usage + -m, --message string Set commit message for imported image +``` + +## Description + +You can specify a `URL` or `-` (dash) to take data directly from `STDIN`. The +`URL` can point to an archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) +containing a filesystem or to an individual file on the Docker host. If you +specify an archive, Docker untars it in the container relative to the `/` +(root). If you specify an individual file, you must specify the full path within +the host. To import from a remote location, specify a `URI` that begins with the +`http://` or `https://` protocol. + +The `--change` option will apply `Dockerfile` instructions to the image +that is created. +Supported `Dockerfile` instructions: +`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +## Examples + +### Import from a remote location + +This will create a new untagged image. + +```bash +$ docker import http://example.com/exampleimage.tgz +``` + +### Import from a local file + +- Import to docker via pipe and `STDIN`. + + ```bash + $ cat exampleimage.tgz | docker import - exampleimagelocal:new + ``` + +- Import with a commit message. + + ```bash + $ cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new + ``` + +- Import to docker from a local archive. + + ```bash + $ docker import /path/to/exampleimage.tgz + ``` + +### Import from a local directory + +```bash +$ sudo tar -c . | docker import - exampleimagedir +``` + +### Import from a local directory with new configurations + +```bash +$ sudo tar -c . | docker import --change "ENV DEBUG true" - exampleimagedir +``` + +Note the `sudo` in this example – you must preserve +the ownership of the files (especially root ownership) during the +archiving with tar. If you are not root (or the sudo command) when you +tar, then the ownerships might not get preserved. diff --git a/components/engine/docs/reference/commandline/index.md b/components/engine/docs/reference/commandline/index.md new file mode 100644 index 0000000000..c000082fab --- /dev/null +++ b/components/engine/docs/reference/commandline/index.md @@ -0,0 +1,184 @@ +--- +title: "Docker commands" +description: "Docker's CLI command description and usage" +keywords: "Docker, Docker documentation, CLI, command line" +identifier: "smn_cli_guide" +--- + + + +# The Docker commands + +This section contains reference information on using Docker's command line +client. Each command has a reference page along with samples. If you are +unfamiliar with the command line, you should start by reading about how to [Use +the Docker command line](cli.md). + +You start the Docker daemon with the command line. How you start the daemon +affects your Docker containers. For that reason you should also make sure to +read the [`dockerd`](dockerd.md) reference page. + +## Commands by object + +### Docker management commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [dockerd](dockerd.md) | Launch the Docker daemon | +| [info](info.md) | Display system-wide information | +| [inspect](inspect.md)| Return low-level information on a container or image | +| [version](version.md) | Show the Docker version information | + + +### Image commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [build](build.md) | Build an image from a Dockerfile | +| [commit](commit.md) | Create a new image from a container's changes | +| [history](history.md) | Show the history of an image | +| [images](images.md) | List images | +| [import](import.md) | Import the contents from a tarball to create a filesystem image | +| [load](load.md) | Load an image from a tar archive or STDIN | +| [image prune](image_prune.md) | Remove unused images | +| [rmi](rmi.md) | Remove one or more images | +| [save](save.md) | Save images to a tar archive | +| [tag](tag.md) | Tag an image into a repository | + +### Container commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [attach](attach.md) | Attach to a running container | +| [container prune](container_prune.md) | Remove all stopped containers | +| [cp](cp.md) | Copy files/folders from a container to a HOSTDIR or to STDOUT | +| [create](create.md) | Create a new container | +| [diff](diff.md) | Inspect changes on a container's filesystem | +| [events](events.md) | Get real time events from the server | +| [exec](exec.md) | Run a command in a running container | +| [export](export.md) | Export a container's filesystem as a tar archive | +| [kill](kill.md) | Kill a running container | +| [logs](logs.md) | Fetch the logs of a container | +| [pause](pause.md) | Pause all processes within a container | +| [port](port.md) | List port mappings or a specific mapping for the container | +| [ps](ps.md) | List containers | +| [rename](rename.md) | Rename a container | +| [restart](restart.md) | Restart a running container | +| [rm](rm.md) | Remove one or more containers | +| [run](run.md) | Run a command in a new container | +| [start](start.md) | Start one or more stopped containers | +| [stats](stats.md) | Display a live stream of container(s) resource usage statistics | +| [stop](stop.md) | Stop a running container | +| [top](top.md) | Display the running processes of a container | +| [unpause](unpause.md) | Unpause all processes within a container | +| [update](update.md) | Update configuration of one or more containers | +| [wait](wait.md) | Block until a container stops, then print its exit code | + +### Hub and registry commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [login](login.md) | Register or log in to a Docker registry | +| [logout](logout.md) | Log out from a Docker registry | +| [pull](pull.md) | Pull an image or a repository from a Docker registry | +| [push](push.md) | Push an image or a repository to a Docker registry | +| [search](search.md) | Search the Docker Hub for images | + +### Network and connectivity commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [network connect](network_connect.md) | Connect a container to a network | +| [network create](network_create.md) | Create a new network | +| [network disconnect](network_disconnect.md) | Disconnect a container from a network | +| [network inspect](network_inspect.md) | Display information about a network | +| [network ls](network_ls.md) | Lists all the networks the Engine `daemon` knows about | +| [network prune](network_prune.md) | Remove all unused networks | +| [network rm](network_rm.md) | Removes one or more networks | + +### Shared data volume commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [volume create](volume_create.md) | Creates a new volume where containers can consume and store data | +| [volume inspect](volume_inspect.md) | Display information about a volume | +| [volume ls](volume_ls.md) | Lists all the volumes Docker knows about | +| [volume prune](volume_prune.md) | Remove all unused volumes | +| [volume rm](volume_rm.md) | Remove one or more volumes | + +### Swarm node commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [node demote](node_demote.md) | Demotes an existing manager so that it is no longer a manager | +| [node inspect](node_inspect.md) | Inspect a node in the swarm | +| [node ls](node_ls.md) | List nodes in the swarm | +| [node promote](node_promote.md) | Promote a node that is pending a promotion to manager | +| [node ps](node_ps.md) | List tasks running on one or more nodes | +| [node rm](node_rm.md) | Remove one or more nodes from the swarm | +| [node update](node_update.md) | Update attributes for a node | + +### Swarm management commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [swarm init](swarm_init.md) | Initialize a swarm | +| [swarm join](swarm_join.md) | Join a swarm as a manager node or worker node | +| [swarm leave](swarm_leave.md) | Remove the current node from the swarm | +| [swarm join-token](swarm_join_token.md) | Display or rotate join tokens | +| [swarm unlock](swarm_unlock.md) | Unlock swarm | +| [swarm unlock-key](swarm_unlock_key.md) | Manage the unlock key | +| [swarm update](swarm_update.md) | Update attributes of a swarm | + +### Swarm service commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [service create](service_create.md) | Create a new service | +| [service inspect](service_inspect.md) | Inspect a service | +| [service logs](service_logs.md) | Fetch the logs of a service or task | +| [service ls](service_ls.md) | List services in the swarm | +| [service ps](service_ps.md) | List the tasks of a service | +| [service rm](service_rm.md) | Remove a service from the swarm | +| [service scale](service_scale.md) | Set the number of replicas for the desired state of the service | +| [service update](service_update.md) | Update the attributes of a service | + +### Swarm secret commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [secret create](secret_create.md) | Create a secret from a file or STDIN as content | +| [secret inspect](service_inspect.md) | Inspect the specified secret | +| [secret ls](secret_ls.md) | List secrets in the swarm | +| [secret rm](secret_rm.md) | Remove the specified secrets from the swarm | + +### Swarm stack commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [stack deploy](stack_deploy.md) | Deploy a new stack or update an existing stack | +| [stack ls](stack_ls.md) | List stacks in the swarm | +| [stack ps](stack_ps.md) | List the tasks in the stack | +| [stack rm](stack_rm.md) | Remove the stack from the swarm | +| [stack services](stack_services.md) | List the services in the stack | + +### Plugin commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [plugin create](plugin_create.md) | Create a plugin from a rootfs and configuration | +| [plugin disable](plugin_disable.md) | Disable a plugin | +| [plugin enable](plugin_enable.md) | Enable a plugin | +| [plugin inspect](plugin_inspect.md) | Display detailed information on a plugin | +| [plugin install](plugin_install.md) | Install a plugin | +| [plugin ls](plugin_ls.md) | List plugins | +| [plugin push](plugin_push.md) | Push a plugin to a registry | +| [plugin rm](plugin_rm.md) | Remove a plugin | +| [plugin set](plugin_set.md) | Change settings for a plugin | diff --git a/components/engine/docs/reference/commandline/info.md b/components/engine/docs/reference/commandline/info.md new file mode 100644 index 0000000000..9929c04af3 --- /dev/null +++ b/components/engine/docs/reference/commandline/info.md @@ -0,0 +1,245 @@ +--- +title: "info" +description: "The info command description and usage" +keywords: "display, docker, information" +--- + + + +# info + +```markdown +Usage: docker info [OPTIONS] + +Display system-wide information + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +This command displays system wide information regarding the Docker installation. +Information displayed includes the kernel version, number of containers and images. +The number of images shown is the number of unique images. The same image tagged +under different names is counted only once. + +If a format is specified, the given template will be executed instead of the +default format. Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +Depending on the storage driver in use, additional information can be shown, such +as pool name, data file, metadata file, data space used, total data space, metadata +space used, and total metadata space. + +The data file is where the images are stored and the metadata file is where the +meta data regarding those images are stored. When run for the first time Docker +allocates a certain amount of data space and meta data space from the space +available on the volume where `/var/lib/docker` is mounted. + +## Examples + +### Show output + +The example below shows the output for a daemon running on Red Hat Enterprise Linux, +using the `devicemapper` storage driver. As can be seen in the output, additional +information about the `devicemapper` storage driver is shown: + +```bash +$ docker info + +Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 +Images: 52 +Server Version: 1.10.3 +Storage Driver: devicemapper + Pool Name: docker-202:2-25583803-pool + Pool Blocksize: 65.54 kB + Base Device Size: 10.74 GB + Backing Filesystem: xfs + Data file: /dev/loop0 + Metadata file: /dev/loop1 + Data Space Used: 1.68 GB + Data Space Total: 107.4 GB + Data Space Available: 7.548 GB + Metadata Space Used: 2.322 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.145 GB + Udev Sync Supported: true + Deferred Removal Enabled: false + Deferred Deletion Enabled: false + Deferred Deleted Device Count: 0 + Data loop file: /var/lib/docker/devicemapper/devicemapper/data + Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.107-RHEL7 (2015-12-01) +Execution Driver: native-0.2 +Logging Driver: json-file +Plugins: + Volume: local + Network: null host bridge +Kernel Version: 3.10.0-327.el7.x86_64 +Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo) +OSType: linux +Architecture: x86_64 +CPUs: 1 +Total Memory: 991.7 MiB +Name: ip-172-30-0-91.ec2.internal +ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S +Docker Root Dir: /var/lib/docker +Debug mode (client): false +Debug mode (server): false +Username: gordontheturtle +Registry: https://index.docker.io/v1/ +Insecure registries: + myinsecurehost:5000 + 127.0.0.0/8 +``` + +### Show debugging output + +Here is a sample output for a daemon running on Ubuntu, using the overlay2 +storage driver and a node that is part of a 2-node swarm: + +```bash +$ docker -D info + +Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 +Images: 52 +Server Version: 1.13.0 +Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Native Overlay Diff: false +Logging Driver: json-file +Cgroup Driver: cgroupfs +Plugins: + Volume: local + Network: bridge host macvlan null overlay +Swarm: active + NodeID: rdjq45w1op418waxlairloqbm + Is Manager: true + ClusterID: te8kdyw33n36fqiz74bfjeixd + Managers: 1 + Nodes: 2 + Orchestration: + Task History Retention Limit: 5 + Raft: + Snapshot Interval: 10000 + Number of Old Snapshots to Retain: 0 + Heartbeat Tick: 1 + Election Tick: 3 + Dispatcher: + Heartbeat Period: 5 seconds + CA Configuration: + Expiry Duration: 3 months + Root Rotation In Progress: false + Node Address: 172.16.66.128 172.16.66.129 + Manager Addresses: + 172.16.66.128:2477 +Runtimes: runc +Default Runtime: runc +Init Binary: docker-init +containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531 +runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2 +init version: N/A (expected: v0.13.0) +Security Options: + apparmor + seccomp + Profile: default +Kernel Version: 4.4.0-31-generic +Operating System: Ubuntu 16.04.1 LTS +OSType: linux +Architecture: x86_64 +CPUs: 2 +Total Memory: 1.937 GiB +Name: ubuntu +ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326 +Docker Root Dir: /var/lib/docker +Debug Mode (client): true +Debug Mode (server): true + File Descriptors: 30 + Goroutines: 123 + System Time: 2016-11-12T17:24:37.955404361-08:00 + EventsListeners: 0 +Http Proxy: http://test:test@proxy.example.com:8080 +Https Proxy: https://test:test@proxy.example.com:8080 +No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com +Registry: https://index.docker.io/v1/ +WARNING: No swap limit support +Labels: + storage=ssd + staging=true +Experimental: false +Insecure Registries: + 127.0.0.0/8 +Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ +Live Restore Enabled: false +``` + +The global `-D` option causes all `docker` commands to output debug information. + +### Format the output + +You can also specify the output format: + +```bash +$ docker info --format '{{json .}}' + +{"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...} +``` + +### Run `docker info` on Windows + +Here is a sample output for a daemon running on Windows Server 2016: + +```none +E:\docker>docker info + +Containers: 1 + Running: 0 + Paused: 0 + Stopped: 1 +Images: 17 +Server Version: 1.13.0 +Storage Driver: windowsfilter + Windows: +Logging Driver: json-file +Plugins: + Volume: local + Network: nat null overlay +Swarm: inactive +Default Isolation: process +Kernel Version: 10.0 14393 (14393.206.amd64fre.rs1_release.160912-1937) +Operating System: Windows Server 2016 Datacenter +OSType: windows +Architecture: x86_64 +CPUs: 8 +Total Memory: 3.999 GiB +Name: WIN-V0V70C0LU5P +ID: NYMS:B5VK:UMSL:FVDZ:EWB5:FKVK:LPFL:FJMQ:H6FT:BZJ6:L2TD:XH62 +Docker Root Dir: C:\control +Debug Mode (client): false +Debug Mode (server): false +Registry: https://index.docker.io/v1/ +Insecure Registries: + 127.0.0.0/8 +Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ +Live Restore Enabled: false +``` diff --git a/components/engine/docs/reference/commandline/inspect.md b/components/engine/docs/reference/commandline/inspect.md new file mode 100644 index 0000000000..9ce4f5f517 --- /dev/null +++ b/components/engine/docs/reference/commandline/inspect.md @@ -0,0 +1,122 @@ +--- +title: "inspect" +description: "The inspect command description and usage" +keywords: "inspect, container, json" +--- + + + +# inspect + +```markdown +Usage: docker inspect [OPTIONS] NAME|ID [NAME|ID...] + +Return low-level information on Docker object(s) (e.g. container, image, volume, +network, node, service, or task) identified by name or ID + +Options: + -f, --format Format the output using the given Go template + --help Print usage + -s, --size Display total file sizes if the type is container + --type Return JSON for specified type +``` + +## Description + +Docker inspect provides detailed information on constructs controlled by Docker. + +By default, `docker inspect` will render results in a JSON array. + +## Request a custom response format (--format) + +If a format is specified, the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Specify target type (--type) + +`--type container|image|node|network|secret|service|volume|task|plugin` + +The `docker inspect` command matches any type of object by either ID or name. +In some cases multiple type of objects (for example, a container and a volume) +exist with the same name, making the result ambigious. + +To restrict `docker inspect` to a specific type of object, use the `--type` +option. + +The following example inspects a _volume_ named "myvolume" + +```bash +$ docker inspect --type=volume myvolume +``` + +## Examples + +### Get an instance's IP address + +For the most part, you can pick out any field from the JSON in a fairly +straightforward manner. + +```bash +$ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $INSTANCE_ID +``` + +### Get an instance's MAC address + +```bash +$ docker inspect --format='{{range .NetworkSettings.Networks}}{{.MacAddress}}{{end}}' $INSTANCE_ID +``` + +### Get an instance's log path + +```bash +$ docker inspect --format='{{.LogPath}}' $INSTANCE_ID +``` + +### Get an instance's image name + +```bash +$ docker inspect --format='{{.Config.Image}}' $INSTANCE_ID +``` + +### List all port bindings + +You can loop over arrays and maps in the results to produce simple text +output: + +```bash +$ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID +``` + +### Find a specific port mapping + +The `.Field` syntax doesn't work when the field name begins with a +number, but the template language's `index` function does. The +`.NetworkSettings.Ports` section contains a map of the internal port +mappings to a list of external address/port objects. To grab just the +numeric public port, you use `index` to find the specific port map, and +then `index` 0 contains the first object inside of that. Then we ask for +the `HostPort` field to get the public address. + +```bash +$ docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID +``` + +### Get a subsection in JSON format + +If you request a field which is itself a structure containing other +fields, by default you get a Go-style dump of the inner values. +Docker adds a template function, `json`, which can be applied to get +results in JSON format. + +```bash +$ docker inspect --format='{{json .Config}}' $INSTANCE_ID +``` diff --git a/components/engine/docs/reference/commandline/kill.md b/components/engine/docs/reference/commandline/kill.md new file mode 100644 index 0000000000..97b15add9b --- /dev/null +++ b/components/engine/docs/reference/commandline/kill.md @@ -0,0 +1,35 @@ +--- +title: "kill" +description: "The kill command description and usage" +keywords: "container, kill, signal" +--- + + + +# kill + +```markdown +Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] + +Kill one or more running containers + +Options: + --help Print usage + -s, --signal string Signal to send to the container (default "KILL") +``` + +## Description + +The main process inside the container will be sent `SIGKILL`, or any +signal specified with option `--signal`. + +> **Note**: `ENTRYPOINT` and `CMD` in the *shell* form run as a subcommand of +> `/bin/sh -c`, which does not pass signals. This means that the executable is +> not the container’s PID 1 and does not receive Unix signals. diff --git a/components/engine/docs/reference/commandline/load.md b/components/engine/docs/reference/commandline/load.md new file mode 100644 index 0000000000..3ce6c19e24 --- /dev/null +++ b/components/engine/docs/reference/commandline/load.md @@ -0,0 +1,62 @@ +--- +title: "load" +description: "The load command description and usage" +keywords: "stdin, tarred, repository" +--- + + + +# load + +```markdown +Usage: docker load [OPTIONS] + +Load an image from a tar archive or STDIN + +Options: + --help Print usage + -i, --input string Read from tar archive file, instead of STDIN. + The tarball may be compressed with gzip, bzip, or xz + -q, --quiet Suppress the load output but still outputs the imported images +``` +## Description + +`docker load` loads a tarred repository from a file or the standard input stream. +It restores both images and tags. + +## Examples + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE + +$ docker load < busybox.tar.gz + +Loaded image: busybox:latest +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +busybox latest 769b9341d937 7 weeks ago 2.489 MB + +$ docker load --input fedora.tar + +Loaded image: fedora:rawhide + +Loaded image: fedora:20 + +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +busybox latest 769b9341d937 7 weeks ago 2.489 MB +fedora rawhide 0d20aec6529d 7 weeks ago 387 MB +fedora 20 58394af37342 7 weeks ago 385.5 MB +fedora heisenbug 58394af37342 7 weeks ago 385.5 MB +fedora latest 58394af37342 7 weeks ago 385.5 MB +``` diff --git a/components/engine/docs/reference/commandline/login.md b/components/engine/docs/reference/commandline/login.md new file mode 100644 index 0000000000..0b8e697281 --- /dev/null +++ b/components/engine/docs/reference/commandline/login.md @@ -0,0 +1,158 @@ +--- +title: "login" +description: "The login command description and usage" +keywords: "registry, login, image" +--- + + + +# login + +```markdown +Usage: docker login [OPTIONS] [SERVER] + +Log in to a Docker registry. +If no server is specified, the default is defined by the daemon. + +Options: + --help Print usage + -p, --password string Password + -u, --username string Username +``` + +## Description + +Login to a registry. + +### Login to a self-hosted registry + +If you want to login to a self-hosted registry you can specify this by +adding the server name. + +```bash +$ docker login localhost:8080 +``` + +### Privileged user requirement + +`docker login` requires user to use `sudo` or be `root`, except when: + +1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. +2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/security/security/#docker-daemon-attack-surface) for details. + +You can log into any public or private repository for which you have +credentials. When you log in, the command stores encoded credentials in +`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. + +### Credentials store + +The Docker Engine can keep user credentials in an external credentials store, +such as the native keychain of the operating system. Using an external store +is more secure than storing credentials in the Docker configuration file. + +To use a credentials store, you need an external helper program to interact +with a specific keychain or external store. Docker requires the helper +program to be in the client's host `$PATH`. + +This is the list of currently available credentials helpers and where +you can download them from: + +- D-Bus Secret Service: https://github.com/docker/docker-credential-helpers/releases +- Apple macOS keychain: https://github.com/docker/docker-credential-helpers/releases +- Microsoft Windows Credential Manager: https://github.com/docker/docker-credential-helpers/releases + +You need to specify the credentials store in `$HOME/.docker/config.json` +to tell the docker engine to use it. The value of the config property should be +the suffix of the program to use (i.e. everything after `docker-credential-`). +For example, to use `docker-credential-osxkeychain`: + +```json +{ + "credsStore": "osxkeychain" +} +``` + +If you are currently logged in, run `docker logout` to remove +the credentials from the file and run `docker login` again. + +### Credential helper protocol + +Credential helpers can be any program or script that follows a very simple protocol. +This protocol is heavily inspired by Git, but it differs in the information shared. + +The helpers always use the first argument in the command to identify the action. +There are only three possible values for that argument: `store`, `get`, and `erase`. + +The `store` command takes a JSON payload from the standard input. That payload carries +the server address, to identify the credential, the user name, and either a password +or an identity token. + +```json +{ + "ServerURL": "https://index.docker.io/v1", + "Username": "david", + "Secret": "passw0rd1" +} +``` + +If the secret being stored is an identity token, the Username should be set to +``. + +The `store` command can write error messages to `STDOUT` that the docker engine +will show if there was an issue. + +The `get` command takes a string payload from the standard input. That payload carries +the server address that the docker engine needs credentials for. This is +an example of that payload: `https://index.docker.io/v1`. + +The `get` command writes a JSON payload to `STDOUT`. Docker reads the user name +and password from this payload: + +```json +{ + "Username": "david", + "Secret": "passw0rd1" +} +``` + +The `erase` command takes a string payload from `STDIN`. That payload carries +the server address that the docker engine wants to remove credentials for. This is +an example of that payload: `https://index.docker.io/v1`. + +The `erase` command can write error messages to `STDOUT` that the docker engine +will show if there was an issue. + +### Credential helpers + +Credential helpers are similar to the credential store above, but act as the +designated programs to handle credentials for *specific registries*. The default +credential store (`credsStore` or the config file itself) will not be used for +operations concerning credentials of the specified registries. + +### Logging out + +If you are currently logged in, run `docker logout` to remove +the credentials from the default store. + +Credential helpers are specified in a similar way to `credsStore`, but +allow for multiple helpers to be configured at a time. Keys specify the +registry domain, and values specify the suffix of the program to use +(i.e. everything after `docker-credential-`). +For example: + +```json +{ + "credHelpers": { + "registry.example.com": "registryhelper", + "awesomereg.example.org": "hip-star", + "unicorn.example.io": "vcbait" + } +} +``` diff --git a/components/engine/docs/reference/commandline/logout.md b/components/engine/docs/reference/commandline/logout.md new file mode 100644 index 0000000000..1e150eb848 --- /dev/null +++ b/components/engine/docs/reference/commandline/logout.md @@ -0,0 +1,32 @@ +--- +title: "logout" +description: "The logout command description and usage" +keywords: "logout, docker, registry" +--- + + + +# logout + +```markdown +Usage: docker logout [SERVER] + +Log out from a Docker registry. +If no server is specified, the default is defined by the daemon. + +Options: + --help Print usage +``` + +## Examples + +```bash +$ docker logout localhost:8080 +``` diff --git a/components/engine/docs/reference/commandline/logs.md b/components/engine/docs/reference/commandline/logs.md new file mode 100644 index 0000000000..75f25f7657 --- /dev/null +++ b/components/engine/docs/reference/commandline/logs.md @@ -0,0 +1,68 @@ +--- +title: "logs" +description: "The logs command description and usage" +keywords: "logs, retrieve, docker" +--- + + + +# logs + +```markdown +Usage: docker logs [OPTIONS] CONTAINER + +Fetch the logs of a container + +Options: + --details Show extra details provided to logs + -f, --follow Follow log output + --help Print usage + --since string Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes) + --tail string Number of lines to show from the end of the logs (default "all") + -t, --timestamps Show timestamps +``` + +## Description + +The `docker logs` command batch-retrieves logs present at the time of execution. + +> **Note**: this command is only functional for containers that are started with +> the `json-file` or `journald` logging driver. + +For more information about selecting and configuring logging drivers, refer to +[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). + +The `docker logs --follow` command will continue streaming the new output from +the container's `STDOUT` and `STDERR`. + +Passing a negative number or a non-integer to `--tail` is invalid and the +value is set to `all` in that case. + +The `docker logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) +, for example `2014-09-16T06:17:46.000000000Z`, to each +log entry. To ensure that the timestamps are aligned the +nano-second part of the timestamp will be padded with zero when necessary. + +The `docker logs --details` command will add on extra attributes, such as +environment variables and labels, provided to `--log-opt` when creating the +container. + +The `--since` option shows only the container logs generated after +a given date. You can specify the date as an RFC 3339 date, a UNIX +timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date +format you may also use RFC3339Nano, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. You can combine the +`--since` option with either or both of the `--follow` or `--tail` options. diff --git a/components/engine/docs/reference/commandline/network.md b/components/engine/docs/reference/commandline/network.md new file mode 100644 index 0000000000..4555740dad --- /dev/null +++ b/components/engine/docs/reference/commandline/network.md @@ -0,0 +1,51 @@ +--- +title: "network" +description: "The network command description and usage" +keywords: "network" +--- + + + +# network + +```markdown +Usage: docker network COMMAND + +Manage networks + +Options: + --help Print usage + +Commands: + connect Connect a container to a network + create Create a network + disconnect Disconnect a container from a network + inspect Display detailed information on one or more networks + ls List networks + prune Remove all unused networks + rm Remove one or more networks + +Run 'docker network COMMAND --help' for more information on a command. +``` + +## Description + +Manage networks. You can use subcommands to create, inspect, list, remove, +prune, connect, and disconnect networks. + +## Related commands + +* [network create](network_create.md) +* [network inspect](network_inspect.md) +* [network list](network_list.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [network connect](network_connect.md) +* [network disconnect](network_disconnect.md) diff --git a/components/engine/docs/reference/commandline/network_connect.md b/components/engine/docs/reference/commandline/network_connect.md new file mode 100644 index 0000000000..ba01fc6add --- /dev/null +++ b/components/engine/docs/reference/commandline/network_connect.md @@ -0,0 +1,117 @@ +--- +title: "network connect" +description: "The network connect command description and usage" +keywords: "network, connect, user-defined" +--- + + + +# network connect + +```markdown +Usage: docker network connect [OPTIONS] NETWORK CONTAINER + +Connect a container to a network + +Options: + --alias value Add network-scoped alias for the container (default []) + --help Print usage + --ip string IPv4 address (e.g., 172.30.100.104) + --ip6 string IPv6 address (e.g., 2001:db8::33) + --link value Add link to another container (default []) + --link-local-ip value Add a link-local address for the container (default []) +``` + +## Description + +Connects a container to a network. You can connect a container by name +or by ID. Once connected, the container can communicate with other containers in +the same network. + +## Examples + +### Connect a running container to a network + +```bash +$ docker network connect multi-host-network container1 +``` + +### Connect a container to a network when it starts + +You can also use the `docker run --network=` option to start a container and immediately connect it to a network. + +```bash +$ docker run -itd --network=multi-host-network busybox +``` + +### Specify the IP address a container will use on a given network + +You can specify the IP address you want to be assigned to the container's interface. + +```bash +$ docker network connect --ip 10.10.36.122 multi-host-network container2 +``` + +### Use the legacy `--link` option + +You can use `--link` option to link another container with a preferred alias + +```bash +$ docker network connect --link container1:c1 multi-host-network container2 +``` + +### Create a network alias for a container + +`--alias` option can be used to resolve the container by another name in the network +being connected to. + +```bash +$ docker network connect --alias db --alias mysql multi-host-network container2 +``` + +### Network implications of stopping, pausing, or restarting containers + +You can pause, restart, and stop containers that are connected to a network. +A container connects to its configured networks when it runs. + +If specified, the container's IP address(es) is reapplied when a stopped +container is restarted. If the IP address is no longer available, the container +fails to start. One way to guarantee that the IP address is available is +to specify an `--ip-range` when creating the network, and choose the static IP +address(es) from outside that range. This ensures that the IP address is not +given to another container while this container is not on the network. + +```bash +$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network +``` + +```bash +$ docker network connect --ip 172.20.128.2 multi-host-network container2 +``` + +To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. + +Once connected in network, containers can communicate using only another +container's IP address or name. For `overlay` networks or custom plugins that +support multi-host connectivity, containers connected to the same multi-host +network but launched from different Engines can also communicate in this way. + +You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. + +## Related commands + +* [network inspect](network_inspect.md) +* [network create](network_create.md) +* [network disconnect](network_disconnect.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) +* [Work with networks](https://docs.docker.com/engine/userguide/networking/work-with-networks/) diff --git a/components/engine/docs/reference/commandline/network_create.md b/components/engine/docs/reference/commandline/network_create.md new file mode 100644 index 0000000000..099cc1439a --- /dev/null +++ b/components/engine/docs/reference/commandline/network_create.md @@ -0,0 +1,227 @@ +--- +title: "network create" +description: "The network create command description and usage" +keywords: "network, create" +--- + + + +# network create + +```markdown +Usage: docker network create [OPTIONS] NETWORK + +Create a network + +Options: + --attachable Enable manual container attachment + --ingress Specify the network provides the routing-mesh + --aux-address value Auxiliary IPv4 or IPv6 addresses used by Network + driver (default map[]) + -d, --driver string Driver to manage the Network (default "bridge") + --gateway value IPv4 or IPv6 Gateway for the master subnet (default []) + --help Print usage + --internal Restrict external access to the network + --ip-range value Allocate container ip from a sub-range (default []) + --ipam-driver string IP Address Management Driver (default "default") + --ipam-opt value Set IPAM driver specific options (default map[]) + --ipv6 Enable IPv6 networking + --label value Set metadata on a network (default []) + -o, --opt value Set driver specific options (default map[]) + --subnet value Subnet in CIDR format that represents a + network segment (default []) + --scope value Promote a network to swarm scope (value = [ local | swarm ]) + --config-only Creates a configuration only network + --config-from The name of the network from which copying the configuration +``` + +## Description + +Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the +built-in network drivers. If you have installed a third party or your own custom +network driver you can specify that `DRIVER` here also. If you don't specify the +`--driver` option, the command automatically creates a `bridge` network for you. +When you install Docker Engine it creates a `bridge` network automatically. This +network corresponds to the `docker0` bridge that Engine has traditionally relied +on. When you launch a new container with `docker run` it automatically connects to +this bridge network. You cannot remove this default bridge network, but you can +create new ones using the `network create` command. + +```bash +$ docker network create -d bridge my-bridge-network +``` + +Bridge networks are isolated networks on a single Engine installation. If you +want to create a network that spans multiple Docker hosts each running an +Engine, you must create an `overlay` network. Unlike `bridge` networks, overlay +networks require some pre-existing conditions before you can create one. These +conditions are: + +* Access to a key-value store. Engine supports Consul, Etcd, and ZooKeeper (Distributed store) key-value stores. +* A cluster of hosts with connectivity to the key-value store. +* A properly configured Engine `daemon` on each host in the cluster. + +The `dockerd` options that support the `overlay` network are: + +* `--cluster-store` +* `--cluster-store-opt` +* `--cluster-advertise` + +To read more about these options and how to configure them, see ["*Get started +with multi-host network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay). + +While not required, it is a good idea to install Docker Swarm to +manage the cluster that makes up your network. Swarm provides sophisticated +discovery and server management tools that can assist your implementation. + +Once you have prepared the `overlay` network prerequisites you simply choose a +Docker host in the cluster and issue the following to create the network: + +```bash +$ docker network create -d overlay my-multihost-network +``` + +Network names must be unique. The Docker daemon attempts to identify naming +conflicts but this is not guaranteed. It is the user's responsibility to avoid +name conflicts. + +## Examples + +### Connect containers + +When you start a container, use the `--network` flag to connect it to a network. +This example adds the `busybox` container to the `mynet` network: + +```bash +$ docker run -itd --network=mynet busybox +``` + +If you want to add a container to a network after the container is already +running, use the `docker network connect` subcommand. + +You can connect multiple containers to the same network. Once connected, the +containers can communicate using only another container's IP address or name. +For `overlay` networks or custom plugins that support multi-host connectivity, +containers connected to the same multi-host network but launched from different +Engines can also communicate in this way. + +You can disconnect a container from a network using the `docker network +disconnect` command. + +### Specify advanced options + +When you create a network, Engine creates a non-overlapping subnetwork for the +network by default. This subnetwork is not a subdivision of an existing +network. It is purely for ip-addressing purposes. You can override this default +and specify subnetwork values directly using the `--subnet` option. On a +`bridge` network you can only create a single subnet: + +```bash +$ docker network create --driver=bridge --subnet=192.168.0.0/16 br0 +``` + +Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` +options. + +```bash +$ docker network create \ + --driver=bridge \ + --subnet=172.28.0.0/16 \ + --ip-range=172.28.5.0/24 \ + --gateway=172.28.5.254 \ + br0 +``` + +If you omit the `--gateway` flag the Engine selects one for you from inside a +preferred pool. For `overlay` networks and for network driver plugins that +support it you can create multiple subnetworks. + +```bash +$ docker network create -d overlay \ + --subnet=192.168.0.0/16 \ + --subnet=192.170.0.0/16 \ + --gateway=192.168.0.100 \ + --gateway=192.170.0.100 \ + --ip-range=192.168.1.0/24 \ + --aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \ + --aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \ + my-multihost-network +``` + +Be sure that your subnetworks do not overlap. If they do, the network create +fails and Engine returns an error. + +### Bridge driver options + +When creating a custom network, the default network driver (i.e. `bridge`) has +additional options that can be passed. The following are those options and the +equivalent docker daemon flags used for docker0 bridge: + +| Option | Equivalent | Description | +|--------------------------------------------------|-------------|-------------------------------------------------------| +| `com.docker.network.bridge.name` | - | bridge name to be used when creating the Linux bridge | +| `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading | +| `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity | +| `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports | +| `com.docker.network.driver.mtu` | `--mtu` | Set the containers network MTU | + +The following arguments can be passed to `docker network create` for any +network driver, again with their approximate equivalents to `docker daemon`. + +| Argument | Equivalent | Description | +|--------------|----------------|--------------------------------------------| +| `--gateway` | - | IPv4 or IPv6 Gateway for the master subnet | +| `--ip-range` | `--fixed-cidr` | Allocate IPs from a range | +| `--internal` | - | Restrict external access to the network | +| `--ipv6` | `--ipv6` | Enable IPv6 networking | +| `--subnet` | `--bip` | Subnet for network | + +For example, let's use `-o` or `--opt` options to specify an IP address binding +when publishing ports: + +```bash +$ docker network create \ + -o "com.docker.network.bridge.host_binding_ipv4"="172.19.0.1" \ + simple-network +``` + +### Network internal mode + +By default, when you connect a container to an `overlay` network, Docker also +connects a bridge network to it to provide external connectivity. If you want +to create an externally isolated `overlay` network, you can specify the +`--internal` option. + +### Network ingress mode + +You can create the network which will be used to provide the routing-mesh in the +swarm cluster. You do so by specifying `--ingress` when creating the network. Only +one ingress network can be created at the time. The network can be removed only +if no services depend on it. Any option available when creating a overlay network +is also available when creating the ingress network, besides the `--attachable` option. + +```bash +$ docker network create -d overlay \ + --subnet=10.11.0.0/16 \ + --ingress \ + --opt com.docker.network.mtu=9216 \ + --opt encrypted=true \ + my-ingress-network +``` + +## Related commands + +* [network inspect](network_inspect.md) +* [network connect](network_connect.md) +* [network disconnect](network_disconnect.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/components/engine/docs/reference/commandline/network_disconnect.md b/components/engine/docs/reference/commandline/network_disconnect.md new file mode 100644 index 0000000000..e855894d27 --- /dev/null +++ b/components/engine/docs/reference/commandline/network_disconnect.md @@ -0,0 +1,48 @@ +--- +title: "network disconnect" +description: "The network disconnect command description and usage" +keywords: "network, disconnect, user-defined" +--- + + + +# network disconnect + +```markdown +Usage: docker network disconnect [OPTIONS] NETWORK CONTAINER + +Disconnect a container from a network + +Options: + -f, --force Force the container to disconnect from a network + --help Print usage +``` + +## Description + +Disconnects a container from a network. The container must be running to +disconnect it from the network. + +## Examples + +```bash + $ docker network disconnect multi-host-network container1 +``` + + +## Related commands + +* [network inspect](network_inspect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/components/engine/docs/reference/commandline/network_inspect.md b/components/engine/docs/reference/commandline/network_inspect.md new file mode 100644 index 0000000000..1a856ddcb1 --- /dev/null +++ b/components/engine/docs/reference/commandline/network_inspect.md @@ -0,0 +1,307 @@ +--- +title: "network inspect" +description: "The network inspect command description and usage" +keywords: "network, inspect, user-defined" +--- + + + +# network inspect + +```markdown +Usage: docker network inspect [OPTIONS] NETWORK [NETWORK...] + +Display detailed information on one or more networks + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +Returns information about one or more networks. By default, this command renders +all results in a JSON object. + +## Examples + +## Inspect the `bridge` network + +Connect two containers to the default `bridge` network: + +```bash +$ sudo docker run -itd --name=container1 busybox +f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 + +$ sudo docker run -itd --name=container2 busybox +bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 +``` + +The `network inspect` command shows the containers, by id, in its +results. For networks backed by multi-host network driver, such as Overlay, +this command also shows the container endpoints in other hosts in the +cluster. These endpoints are represented as "ep-{endpoint-id}" in the output. +However, for swarm mode networks, only the endpoints that are local to the +node are shown. + +You can specify an alternate format to execute a given +template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +```none +$ sudo docker network inspect bridge + +[ + { + "Name": "bridge", + "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", + "Created": "2016-10-19T04:33:30.360899459Z", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.42.1/16", + "Gateway": "172.17.42.1" + } + ] + }, + "Internal": false, + "Containers": { + "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { + "Name": "container2", + "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + }, + "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { + "Name": "container1", + "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", + "MacAddress": "02:42:ac:11:00:01", + "IPv4Address": "172.17.0.1/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": {} + } +] +``` + +### Inspect a user-defined network + +Create and inspect a user-defined network: + +```bash +$ docker network create simple-network + +69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a +``` + +```none +$ docker network inspect simple-network + +[ + { + "Name": "simple-network", + "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", + "Created": "2016-10-19T04:33:30.360899459Z", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.22.0.0/16", + "Gateway": "172.22.0.1" + } + ] + }, + "Containers": {}, + "Options": {}, + "Labels": {} + } +] +``` + +### Inspect the `ingress` network + +For swarm mode overlay networks `network inspect` also shows the IP address and node name +of the peers. Peers are the nodes in the swarm cluster which have at least one task attached +to the network. Node name is of the format `-`. + +```none +$ docker network inspect ingress + +[ + { + "Name": "ingress", + "Id": "j0izitrut30h975vk4m1u5kk3", + "Created": "2016-11-08T06:49:59.803387552Z", + "Scope": "swarm", + "Driver": "overlay", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Options": null, + "Config": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + }, + "Internal": false, + "Attachable": false, + "Containers": { + "ingress-sbox": { + "Name": "ingress-endpoint", + "EndpointID": "40e002d27b7e5d75f60bc72199d8cae3344e1896abec5eddae9743755fe09115", + "MacAddress": "02:42:0a:ff:00:03", + "IPv4Address": "10.255.0.3/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + }, + "Labels": {}, + "Peers": [ + { + "Name": "net-1-1d22adfe4d5c", + "IP": "192.168.33.11" + }, + { + "Name": "net-2-d55d838b34af", + "IP": "192.168.33.12" + }, + { + "Name": "net-3-8473f8140bd9", + "IP": "192.168.33.13" + } + ] + } +] +``` + +### Using `verbose` option for `network inspect` + +`docker network inspect --verbose` for swarm mode overlay networks shows service-specific +details such as the service's VIP and port mappings. It also shows IPs of service tasks, +and the IPs of the nodes where the tasks are running. + +Following is an example output for a overlay network `ov1` that has one service `s1` +attached to. service `s1` in this case has three replicas. + +```bash +$ docker network inspect --verbose ov1 +[ + { + "Name": "ov1", + "Id": "ybmyjvao9vtzy3oorxbssj13b", + "Created": "2017-03-13T17:04:39.776106792Z", + "Scope": "swarm", + "Driver": "overlay", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Options": null, + "Config": [ + { + "Subnet": "10.0.0.0/24", + "Gateway": "10.0.0.1" + } + ] + }, + "Internal": false, + "Attachable": false, + "Containers": { + "020403bd88a15f60747fd25d1ad5fa1272eb740e8a97fc547d8ad07b2f721c5e": { + "Name": "s1.1.pjn2ik0sfgkfzed3h0s00gs9o", + "EndpointID": "ad16946f416562d658f3bb30b9830d73ad91ccf6feae44411269cd0ff674714e", + "MacAddress": "02:42:0a:00:00:04", + "IPv4Address": "10.0.0.4/24", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "4097" + }, + "Labels": {}, + "Peers": [ + { + "Name": "net-3-5d3cfd30a58c", + "IP": "192.168.33.13" + }, + { + "Name": "net-1-6ecbc0040a73", + "IP": "192.168.33.11" + }, + { + "Name": "net-2-fb80208efd75", + "IP": "192.168.33.12" + } + ], + "Services": { + "s1": { + "VIP": "10.0.0.2", + "Ports": [], + "LocalLBIndex": 257, + "Tasks": [ + { + "Name": "s1.2.q4hcq2aiiml25ubtrtg4q1txt", + "EndpointID": "040879b027e55fb658e8b60ae3b87c6cdac7d291e86a190a3b5ac6567b26511a", + "EndpointIP": "10.0.0.5", + "Info": { + "Host IP": "192.168.33.11" + } + }, + { + "Name": "s1.3.yawl4cgkp7imkfx469kn9j6lm", + "EndpointID": "106edff9f120efe44068b834e1cddb5b39dd4a3af70211378b2f7a9e562bbad8", + "EndpointIP": "10.0.0.3", + "Info": { + "Host IP": "192.168.33.12" + } + }, + { + "Name": "s1.1.pjn2ik0sfgkfzed3h0s00gs9o", + "EndpointID": "ad16946f416562d658f3bb30b9830d73ad91ccf6feae44411269cd0ff674714e", + "EndpointIP": "10.0.0.4", + "Info": { + "Host IP": "192.168.33.13" + } + } + ] + } + } + } +] +``` + +## Related commands + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/components/engine/docs/reference/commandline/network_ls.md b/components/engine/docs/reference/commandline/network_ls.md new file mode 100644 index 0000000000..8bb8a2c48a --- /dev/null +++ b/components/engine/docs/reference/commandline/network_ls.md @@ -0,0 +1,250 @@ +--- +title: "network ls" +description: "The network ls command description and usage" +keywords: "network, list, user-defined" +--- + + + +# docker network ls + +```markdown +Usage: docker network ls [OPTIONS] + +List networks + +Aliases: + ls, list + +Options: + -f, --filter filter Provide filter values (e.g. 'driver=bridge') + --format string Pretty-print networks using a Go template + --help Print usage + --no-trunc Do not truncate the output + -q, --quiet Only display network IDs +``` + +## Description + +Lists all the networks the Engine `daemon` knows about. This includes the +networks that span across multiple hosts in a cluster. + +## Examples + +### List all networks + +```bash +$ sudo docker network ls +NETWORK ID NAME DRIVER SCOPE +7fca4eb8c647 bridge bridge local +9f904ee27bf5 none null local +cf03ee007fb4 host host local +78b03ee04fc4 multi-host overlay swarm +``` + +Use the `--no-trunc` option to display the full network id: + +```bash +$ docker network ls --no-trunc +NETWORK ID NAME DRIVER SCOPE +18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null local +c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host local +7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge local +95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge local +63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge local +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. + +The currently supported filters are: + +* driver +* id (network's id) +* label (`label=` or `label==`) +* name (network's name) +* scope (`swarm|global|local`) +* type (`custom|builtin`) + +#### Driver + +The `driver` filter matches networks based on their driver. + +The following example matches networks with the `bridge` driver: + +```bash +$ docker network ls --filter driver=bridge +NETWORK ID NAME DRIVER SCOPE +db9db329f835 test1 bridge local +f6e212da9dfd test2 bridge local +``` + +#### ID + +The `id` filter matches on all or part of a network's ID. + +The following filter matches all networks with an ID containing the +`63d1ff1f77b0...` string. + +```bash +$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 +NETWORK ID NAME DRIVER SCOPE +63d1ff1f77b0 dev bridge local +``` + +You can also filter for a substring in an ID as this shows: + +```bash +$ docker network ls --filter id=95e74588f40d +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local + +$ docker network ls --filter id=95e +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +``` + +#### Label + +The `label` filter matches networks based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches networks with the `usage` label regardless of its value. + +```bash +$ docker network ls -f "label=usage" +NETWORK ID NAME DRIVER SCOPE +db9db329f835 test1 bridge local +f6e212da9dfd test2 bridge local +``` + +The following filter matches networks with the `usage` label with the `prod` value. + +```bash +$ docker network ls -f "label=usage=prod" +NETWORK ID NAME DRIVER SCOPE +f6e212da9dfd test2 bridge local +``` + +#### Name + +The `name` filter matches on all or part of a network's name. + +The following filter matches all networks with a name containing the `foobar` string. + +```bash +$ docker network ls --filter name=foobar +NETWORK ID NAME DRIVER SCOPE +06e7eef0a170 foobar bridge local +``` + +You can also filter for a substring in a name as this shows: + +```bash +$ docker network ls --filter name=foo +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +06e7eef0a170 foobar bridge local +``` + +#### Scope + +The `scope` filter matches networks based on their scope. + +The following example matches networks with the `swarm` scope: + +```bash +$ docker network ls --filter scope=swarm +NETWORK ID NAME DRIVER SCOPE +xbtm0v4f1lfh ingress overlay swarm +ic6r88twuu92 swarmnet overlay swarm +``` + +The following example matches networks with the `local` scope: + +```bash +$ docker network ls --filter scope=local +NETWORK ID NAME DRIVER SCOPE +e85227439ac7 bridge bridge local +0ca0e19443ed host host local +ca13cc149a36 localnet bridge local +f9e115d2de35 none null local +``` + +#### Type + +The `type` filter supports two values; `builtin` displays predefined networks +(`bridge`, `none`, `host`), whereas `custom` displays user defined networks. + +The following filter matches all user defined networks: + +```bash +$ docker network ls --filter type=custom +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +63d1ff1f77b0 dev bridge local +``` + +By having this flag it allows for batch cleanup. For example, use this filter +to delete all user defined networks: + +```bash +$ docker network rm `docker network ls --filter type=custom -q` +``` + +A warning will be issued when trying to remove a network that has containers +attached. + +### Formatting + +The formatting options (`--format`) pretty-prints networks output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +-------------|------------------------------------------------------------------------------------------ +`.ID` | Network ID +`.Name` | Network name +`.Driver` | Network driver +`.Scope` | Network scope (local, global) +`.IPv6` | Whether IPv6 is enabled on the network or not. +`.Internal` | Whether the network is internal or not. +`.Labels` | All labels assigned to the network. +`.Label` | Value of a specific label for this network. For example `{{.Label "project.version"}}` +`.CreatedAt` | Time when the network was created + +When using the `--format` option, the `network ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Driver` entries separated by a colon for all networks: + +```bash +$ docker network ls --format "{{.ID}}: {{.Driver}}" +afaaab448eb2: bridge +d1584f8dc718: host +391df270dc66: null +``` + +## Related commands + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network inspect](network_inspect.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/components/engine/docs/reference/commandline/network_prune.md b/components/engine/docs/reference/commandline/network_prune.md new file mode 100644 index 0000000000..9ef541061d --- /dev/null +++ b/components/engine/docs/reference/commandline/network_prune.md @@ -0,0 +1,104 @@ +--- +title: "network prune" +description: "Remove unused networks" +keywords: "network, prune, delete" +--- + +# network prune + +```markdown +Usage: docker network prune [OPTIONS] + +Remove all unused networks + +Options: + --filter filter Provide filter values (e.g. 'until=') + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Description + +Remove all unused networks. Unused networks are those which are not referenced +by any containers. + +## Examples + +```bash +$ docker network prune + +WARNING! This will remove all networks not used by at least one container. +Are you sure you want to continue? [y/N] y +Deleted Networks: +n1 +n2 +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* until (``) - only remove networks created before given timestamp +* label (`label=`, `label==`, `label!=`, or `label!==`) - only remove networks with (or without, in case `label!=...` is used) the specified labels. + +The `until` filter can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the daemon machine’s time. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the daemon will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +The `label` filter accepts two formats. One is the `label=...` (`label=` or `label==`), +which removes networks with the specified labels. The other +format is the `label!=...` (`label!=` or `label!==`), which removes +networks without the specified labels. + +The following removes networks created more than 5 minutes ago. Note that +system networks such as `bridge`, `host`, and `none` will never be pruned: + +```none +$ docker network ls + +NETWORK ID NAME DRIVER SCOPE +7430df902d7a bridge bridge local +ea92373fd499 foo-1-day-ago bridge local +ab53663ed3c7 foo-1-min-ago bridge local +97b91972bc3b host host local +f949d337b1f5 none null local + +$ docker network prune --force --filter until=5m + +Deleted Networks: +foo-1-day-ago + +$ docker network ls + +NETWORK ID NAME DRIVER SCOPE +7430df902d7a bridge bridge local +ab53663ed3c7 foo-1-min-ago bridge local +97b91972bc3b host host local +f949d337b1f5 none null local +``` + +## Related commands + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network inspect](network_inspect.md) +* [network rm](network_rm.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) +* [system df](system_df.md) +* [container prune](container_prune.md) +* [image prune](image_prune.md) +* [volume prune](volume_prune.md) +* [system prune](system_prune.md) diff --git a/components/engine/docs/reference/commandline/network_rm.md b/components/engine/docs/reference/commandline/network_rm.md new file mode 100644 index 0000000000..aab487a044 --- /dev/null +++ b/components/engine/docs/reference/commandline/network_rm.md @@ -0,0 +1,68 @@ +--- +title: "network rm" +description: "the network rm command description and usage" +keywords: "network, rm, user-defined" +--- + + + +# network rm + +```markdown +Usage: docker network rm NETWORK [NETWORK...] + +Remove one or more networks + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +## Description + +Removes one or more networks by name or identifier. To remove a network, +you must first disconnect any containers connected to it. + +## Examples + +### Remove a network + +To remove the network named 'my-network': + +```bash + $ docker network rm my-network +``` + +### Remove multiple networks + +To delete multiple networks in a single `docker network rm` command, provide +multiple network names or ids. The following example deletes a network with id +`3695c422697f` and a network named `my-network`: + +```bash + $ docker network rm 3695c422697f my-network +``` + +When you specify multiple networks, the command attempts to delete each in turn. +If the deletion of one network fails, the command continues to the next on the +list and tries to delete that. The command reports success or failure for each +deletion. + +## Related commands + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network inspect](network_inspect.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/components/engine/docs/reference/commandline/node.md b/components/engine/docs/reference/commandline/node.md new file mode 100644 index 0000000000..3a7d4b3a76 --- /dev/null +++ b/components/engine/docs/reference/commandline/node.md @@ -0,0 +1,42 @@ + +--- +title: "node" +description: "The node command description and usage" +keywords: "node" +--- + + + +# node + +```markdown +Usage: docker node COMMAND + +Manage Swarm nodes + +Options: + --help Print usage + +Commands: + demote Demote one or more nodes from manager in the swarm + inspect Display detailed information on one or more nodes + ls List nodes in the swarm + promote Promote one or more nodes to manager in the swarm + ps List tasks running on one or more nodes, defaults to current node + rm Remove one or more nodes from the swarm + update Update a node + +Run 'docker node COMMAND --help' for more information on a command. +``` + +## Description + +Manage nodes. + diff --git a/components/engine/docs/reference/commandline/node_demote.md b/components/engine/docs/reference/commandline/node_demote.md new file mode 100644 index 0000000000..e6e59d8945 --- /dev/null +++ b/components/engine/docs/reference/commandline/node_demote.md @@ -0,0 +1,47 @@ +--- +title: "node demote" +description: "The node demote command description and usage" +keywords: "node, demote" +--- + + + +# node demote + +```markdown +Usage: docker node demote NODE [NODE...] + +Demote one or more nodes from manager in the swarm + +Options: + --help Print usage + +``` + +## Description + +Demotes an existing manager so that it is no longer a manager. This command +targets a docker engine that is a manager in the swarm. + + +## Examples + +```bash +$ docker node demote +``` + +## Related commands + +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/components/engine/docs/reference/commandline/node_inspect.md b/components/engine/docs/reference/commandline/node_inspect.md new file mode 100644 index 0000000000..6ee9e17e6f --- /dev/null +++ b/components/engine/docs/reference/commandline/node_inspect.md @@ -0,0 +1,167 @@ +--- +title: "node inspect" +description: "The node inspect command description and usage" +keywords: "node, inspect" +--- + + + +# node inspect + +```markdown +Usage: docker node inspect [OPTIONS] self|NODE [NODE...] + +Display detailed information on one or more nodes + +Options: + -f, --format string Format the output using the given Go template + --help Print usage + --pretty Print the information in a human friendly format +``` + +## Description + +Returns information about a node. By default, this command renders all results +in a JSON array. You can specify an alternate format to execute a +given template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +## Examples + +### Inspect a node + +```none +$ docker node inspect swarm-manager + +[ +{ + "ID": "e216jshn25ckzbvmwlnh5jr3g", + "Version": { + "Index": 10 + }, + "CreatedAt": "2017-05-16T22:52:44.9910662Z", + "UpdatedAt": "2017-05-16T22:52:45.230878043Z", + "Spec": { + "Role": "manager", + "Availability": "active" + }, + "Description": { + "Hostname": "swarm-manager", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 1000000000, + "MemoryBytes": 1039843328 + }, + "Engine": { + "EngineVersion": "17.06.0-ce", + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "overlay" + }, + { + "Type": "Network", + "Name": "null" + }, + { + "Type": "Network", + "Name": "host" + }, + { + "Type": "Network", + "Name": "bridge" + }, + { + "Type": "Network", + "Name": "overlay" + } + ] + }, + "TLSInfo": { + "TrustRoot": "-----BEGIN CERTIFICATE-----\nMIIBazCCARCgAwIBAgIUOzgqU4tA2q5Yv1HnkzhSIwGyIBswCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTAyMDAyNDAwWhcNMzcwNDI3MDAy\nNDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABMbiAmET+HZyve35ujrnL2kOLBEQhFDZ5MhxAuYs96n796sFlfxTxC1lM/2g\nAh8DI34pm3JmHgZxeBPKUURJHKWjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBS3sjTJOcXdkls6WSY2rTx1KIJueTAKBggqhkjO\nPQQDAgNJADBGAiEAoeVWkaXgSUAucQmZ3Yhmx22N/cq1EPBgYHOBZmHt0NkCIQC3\nzONcJ/+WA21OXtb+vcijpUOXtNjyHfcox0N8wsLDqQ==\n-----END CERTIFICATE-----\n", + "CertIssuerSubject": "MBMxETAPBgNVBAMTCHN3YXJtLWNh", + "CertIssuerPublicKey": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExuICYRP4dnK97fm6OucvaQ4sERCEUNnkyHEC5iz3qfv3qwWV/FPELWUz/aACHwMjfimbcmYeBnF4E8pRREkcpQ==" + } + }, + "Status": { + "State": "ready", + "Addr": "168.0.32.137" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "168.0.32.137:2377" + } +} +] +``` + +### Specify an output format + +```none +$ docker node inspect --format '{{ .ManagerStatus.Leader }}' self + +false + +$ docker node inspect --pretty self +ID: e216jshn25ckzbvmwlnh5jr3g +Hostname: swarm-manager +Joined at: 2017-05-16 22:52:44.9910662 +0000 utc +Status: + State: Ready + Availability: Active + Address: 172.17.0.2 +Manager Status: + Address: 172.17.0.2:2377 + Raft Status: Reachable + Leader: Yes +Platform: + Operating System: linux + Architecture: x86_64 +Resources: + CPUs: 4 + Memory: 7.704 GiB +Plugins: + Network: overlay, bridge, null, host, overlay + Volume: local +Engine Version: 17.06.0-ce +TLS Info: + TrustRoot: +-----BEGIN CERTIFICATE----- +MIIBazCCARCgAwIBAgIUOzgqU4tA2q5Yv1HnkzhSIwGyIBswCgYIKoZIzj0EAwIw +EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTAyMDAyNDAwWhcNMzcwNDI3MDAy +NDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH +A0IABMbiAmET+HZyve35ujrnL2kOLBEQhFDZ5MhxAuYs96n796sFlfxTxC1lM/2g +Ah8DI34pm3JmHgZxeBPKUURJHKWjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBS3sjTJOcXdkls6WSY2rTx1KIJueTAKBggqhkjO +PQQDAgNJADBGAiEAoeVWkaXgSUAucQmZ3Yhmx22N/cq1EPBgYHOBZmHt0NkCIQC3 +zONcJ/+WA21OXtb+vcijpUOXtNjyHfcox0N8wsLDqQ== +-----END CERTIFICATE----- + + Issuer Public Key: MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExuICYRP4dnK97fm6OucvaQ4sERCEUNnkyHEC5iz3qfv3qwWV/FPELWUz/aACHwMjfimbcmYeBnF4E8pRREkcpQ== + Issuer Subject: MBMxETAPBgNVBAMTCHN3YXJtLWNh +``` + +## Related commands + +* [node demote](node_demote.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/components/engine/docs/reference/commandline/node_ls.md b/components/engine/docs/reference/commandline/node_ls.md new file mode 100644 index 0000000000..224642c2e5 --- /dev/null +++ b/components/engine/docs/reference/commandline/node_ls.md @@ -0,0 +1,171 @@ +--- +title: "node ls" +description: "The node ls command description and usage" +keywords: "node, list" +--- + + + +# node ls + +```markdown +Usage: docker node ls [OPTIONS] + +List nodes in the swarm + +Aliases: + ls, list + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print nodes using a Go template + --help Print usage + -q, --quiet Only display IDs +``` + +## Description + +Lists all the nodes that the Docker Swarm manager knows about. You can filter +using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section +for more information about available filter options. + +## Examples + +```bash +$ docker node ls + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` +> **Note**: +> In the above example output, there is a hidden column of `.Self` that indicates if the +> node is the same node as the current docker daemon. A `*` (e.g., `e216jshn25ckzbvmwlnh5jr3g *`) +> means this node is the current docker daemon. + + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [id](node_ls.md#id) +* [label](node_ls.md#label) +* [membership](node_ls.md#membership) +* [name](node_ls.md#name) +* [role](node_ls.md#role) + +#### id + +The `id` filter matches all or part of a node's id. + +```bash +$ docker node ls -f id=1 + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +``` + +#### label + +The `label` filter matches nodes based on engine labels and on the presence of a `label` alone or a `label` and a value. Node labels are currently not used for filtering. + +The following filter matches nodes with the `foo` label regardless of its value. + +```bash +$ docker node ls -f "label=foo" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +``` + +#### membersip + +The `membership` filter matches nodes based on the presence of a `membership` and a value +`accepted` or `pending`. + +The following filter matches nodes with the `membership` of `accepted`. + +```bash +$ docker node ls -f "membership=accepted" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active +``` + +#### name + +The `name` filter matches on all or part of a node hostname. + +The following filter matches the nodes with a name equal to `swarm-master` string. + +```bash +$ docker node ls -f name=swarm-manager1 + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` + +#### role + +The `role` filter matches nodes based on the presence of a `role` and a value `worker` or `manager`. + +The following filter matches nodes with the `manager` role. + +```bash +$ docker node ls -f "role=manager" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` + +### Formatting + +The formatting options (`--format`) pretty-prints nodes output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +-----------------|------------------------------------------------------------------------------------------ +`.ID` | Node ID +`.Self` | Node of the daemon (`true/false`, `true`indicates that the node is the same as current docker daemon) +`.Hostname` | Node hostname +`.Status` | Node status +`.Availability` | Node availability ("active", "pause", or "drain") +`.ManagerStatus` | Manager status of the node +`.TLSStatus` | TLS status of the node ("Ready", or "Needs Rotation" has TLS certificate signed by an old CA) + +When using the `--format` option, the `node ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`ID`, `Hostname`, and `TLS Status` entries separated by a colon for all nodes: + +```bash +$ docker node ls --format "{{.ID}}: {{.Hostname}} {{.TLSStatus}}" +e216jshn25ckzbvmwlnh5jr3g: swarm-manager1 Ready +35o6tiywb700jesrt3dmllaza: swarm-worker1 Needs Rotation +`` + + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/components/engine/docs/reference/commandline/node_promote.md b/components/engine/docs/reference/commandline/node_promote.md new file mode 100644 index 0000000000..1ebbe9550c --- /dev/null +++ b/components/engine/docs/reference/commandline/node_promote.md @@ -0,0 +1,45 @@ +--- +title: "node promote" +description: "The node promote command description and usage" +keywords: "node, promote" +--- + + + +# node promote + +```markdown +Usage: docker node promote NODE [NODE...] + +Promote one or more nodes to manager in the swarm + +Options: + --help Print usage +``` + +## Description + +Promotes a node to manager. This command targets a docker engine that is a +manager in the swarm. + +## Examples + +```bash +$ docker node promote +``` + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/components/engine/docs/reference/commandline/node_ps.md b/components/engine/docs/reference/commandline/node_ps.md new file mode 100644 index 0000000000..0bf76e0d8e --- /dev/null +++ b/components/engine/docs/reference/commandline/node_ps.md @@ -0,0 +1,148 @@ +--- +title: "node ps" +description: "The node ps command description and usage" +keywords: node, tasks, ps +aliases: ["/engine/reference/commandline/node_tasks/"] +--- + + + +# node ps + +```markdown +Usage: docker node ps [OPTIONS] [NODE...] + +List tasks running on one or more nodes, defaults to current node. + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print tasks using a Go template + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output + -q, --quiet Only display task IDs +``` + +## Description + +Lists all the tasks on a Node that Docker knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options. + +## Examples + +```bash +$ docker node ps swarm-manager1 +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours +redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds +redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds +redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [name](#name) +* [id](#id) +* [label](#label) +* [desired-state](#desired-state) + +#### name + +The `name` filter matches on all or part of a task's name. + +The following filter matches all tasks with a name containing the `redis` string. + +```bash +$ docker node ps -f name=redis swarm-manager1 + +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours +redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds +redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds +redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds +``` + +#### id + +The `id` filter matches a task's id. + +```bash +$ docker node ps -f id=bg8c07zzg87di2mufeq51a2qp swarm-manager1 + +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds +``` + +#### label + +The `label` filter matches tasks based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches tasks with the `usage` label regardless of its value. + +```bash +$ docker node ps -f "label=usage" + +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 10 minutes +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 9 minutes +``` + + +#### desired-state + +The `desired-state` filter can take the values `running`, `shutdown`, or `accepted`. + + +### Formatting + +The formatting options (`--format`) pretty-prints tasks output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +----------------|------------------------------------------------------------------------------------------ +`.Name` | Task name +`.Image` | Task image +`.Node` | Node ID +`.DesiredState` | Desired state of the task (`running`, `shutdown`, or `accepted`) +`.CurrentState` | Current state of the task +`.Error` | Error +`.Ports` | Task published ports + +When using the `--format` option, the `node ps` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Name` and `Image` entries separated by a colon for all tasks: + +```bash +$ docker node ps --format "{{.Name}}: {{.Image}}" +top.1: busybox +top.2: busybox +top.3: busybox +``` + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/components/engine/docs/reference/commandline/node_rm.md b/components/engine/docs/reference/commandline/node_rm.md new file mode 100644 index 0000000000..c2fdd4d156 --- /dev/null +++ b/components/engine/docs/reference/commandline/node_rm.md @@ -0,0 +1,80 @@ +--- +title: "node rm" +description: "The node rm command description and usage" +keywords: "node, remove" +--- + + + +# node rm + +```markdown +Usage: docker node rm [OPTIONS] NODE [NODE...] + +Remove one or more nodes from the swarm + +Aliases: + rm, remove + +Options: + -f, --force Force remove a node from the swarm + --help Print usage +``` + +## Description + +When run from a manager node, removes the specified nodes from a swarm. + + +## Examples + +### Remove a stopped node from the swarm + +```bash +$ docker node rm swarm-node-02 + +Node swarm-node-02 removed from swarm +``` +### Attempt to remove a running node from a swarm + +Removes the specified nodes from the swarm, but only if the nodes are in the +down state. If you attempt to remove an active node you will receive an error: + +```non +$ docker node rm swarm-node-03 + +Error response from daemon: rpc error: code = 9 desc = node swarm-node-03 is not +down and can't be removed +``` + +### Forcibly remove an inaccessible node from a swarm + +If you lose access to a worker node or need to shut it down because it has been +compromised or is not behaving as expected, you can use the `--force` option. +This may cause transient errors or interruptions, depending on the type of task +being run on the node. + +```bash +$ docker node rm --force swarm-node-03 + +Node swarm-node-03 removed from swarm +``` + +A manager node must be demoted to a worker node (using `docker node demote`) +before you can remove it from the swarm. + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node update](node_update.md) diff --git a/components/engine/docs/reference/commandline/node_update.md b/components/engine/docs/reference/commandline/node_update.md new file mode 100644 index 0000000000..11117c7a9b --- /dev/null +++ b/components/engine/docs/reference/commandline/node_update.md @@ -0,0 +1,77 @@ +--- +title: "node update" +description: "The node update command description and usage" +keywords: "resources, update, dynamically" +--- + + + +# update + +```markdown +Usage: docker node update [OPTIONS] NODE + +Update a node + +Options: + --availability string Availability of the node ("active"|"pause"|"drain") + --help Print usage + --label-add value Add or update a node label (key=value) (default []) + --label-rm value Remove a node label if exists (default []) + --role string Role of the node ("worker"|"manager") +``` + +## Description + +Update metadata about a node, such as its availability, labels, or roles. + +## Examples + +### Add label metadata to a node + +Add metadata to a swarm node using node labels. You can specify a node label as +a key with an empty value: + +``` bash +$ docker node update --label-add foo worker1 +``` + +To add multiple labels to a node, pass the `--label-add` flag for each label: + +```bash +$ docker node update --label-add foo --label-add bar worker1 +``` + +When you [create a service](service_create.md), +you can use node labels as a constraint. A constraint limits the nodes where the +scheduler deploys tasks for a service. + +For example, to add a `type` label to identify nodes where the scheduler should +deploy message queue service tasks: + +``` bash +$ docker node update --label-add type=queue worker1 +``` + +The labels you set for nodes using `docker node update` apply only to the node +entity within the swarm. Do not confuse them with the docker daemon labels for +[dockerd](https://docs.docker.com/engine/userguide/labels-custom-metadata/#daemon-labels). + +For more information about labels, refer to [apply custom +metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) diff --git a/components/engine/docs/reference/commandline/pause.md b/components/engine/docs/reference/commandline/pause.md new file mode 100644 index 0000000000..5bb652b923 --- /dev/null +++ b/components/engine/docs/reference/commandline/pause.md @@ -0,0 +1,48 @@ +--- +title: "pause" +description: "The pause command description and usage" +keywords: "cgroups, container, suspend, SIGSTOP" +--- + + + +# pause + +```markdown +Usage: docker pause CONTAINER [CONTAINER...] + +Pause all processes within one or more containers + +Options: + --help Print usage +``` + +## Description + +The `docker pause` command suspends all processes in the specified containers. +On Linux, this uses the cgroups freezer. Traditionally, when suspending a process +the `SIGSTOP` signal is used, which is observable by the process being suspended. +With the cgroups freezer the process is unaware, and unable to capture, +that it is being suspended, and subsequently resumed. On Windows, only Hyper-V +containers can be paused. + +See the +[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) +for further details. + +## Examples + +```bash +$ docker pause my_container +``` + +## Related commands + +* [unpause](unpause.md) diff --git a/components/engine/docs/reference/commandline/plugin.md b/components/engine/docs/reference/commandline/plugin.md new file mode 100644 index 0000000000..75082477d1 --- /dev/null +++ b/components/engine/docs/reference/commandline/plugin.md @@ -0,0 +1,44 @@ +--- +title: "plugin" +description: "The plugin command description and usage" +keywords: "plugin" +--- + + + +# plugin + +```markdown +Usage: docker plugin COMMAND + +Manage plugins + +Options: + --help Print usage + +Commands: + create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory. + disable Disable a plugin + enable Enable a plugin + inspect Display detailed information on one or more plugins + install Install a plugin + ls List plugins + push Push a plugin to a registry + rm Remove one or more plugins + set Change settings for a plugin + upgrade Upgrade an existing plugin + +Run 'docker plugin COMMAND --help' for more information on a command. + +``` + +## Description + +Manage plugins. diff --git a/components/engine/docs/reference/commandline/plugin_create.md b/components/engine/docs/reference/commandline/plugin_create.md new file mode 100644 index 0000000000..5f16ac3e43 --- /dev/null +++ b/components/engine/docs/reference/commandline/plugin_create.md @@ -0,0 +1,66 @@ +--- +title: "plugin create" +description: "the plugin create command description and usage" +keywords: "plugin, create" +--- + + + +# plugin create + +```markdown +Usage: docker plugin create [OPTIONS] PLUGIN PLUGIN-DATA-DIR + +Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory. + +Options: + --compress Compress the context using gzip + --help Print usage +``` + +## Description + +Creates a plugin. Before creating the plugin, prepare the plugin's root filesystem as well as +[the config.json](../../extend/config.md) + +## Examples + +The following example shows how to create a sample `plugin`. + +```bash +$ ls -ls /home/pluginDir + +total 4 +4 -rw-r--r-- 1 root root 431 Nov 7 01:40 config.json +0 drwxr-xr-x 19 root root 420 Nov 7 01:40 rootfs + +$ docker plugin create plugin /home/pluginDir + +plugin + +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +672d8144ec02 plugin latest A sample plugin for Docker false +``` + +The plugin can subsequently be enabled for local use or pushed to the public registry. + +## Related commands + +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/components/engine/docs/reference/commandline/plugin_disable.md b/components/engine/docs/reference/commandline/plugin_disable.md new file mode 100644 index 0000000000..fa1327b0c7 --- /dev/null +++ b/components/engine/docs/reference/commandline/plugin_disable.md @@ -0,0 +1,69 @@ +--- +title: "plugin disable" +description: "the plugin disable command description and usage" +keywords: "plugin, disable" +--- + + + +# plugin disable + +```markdown +Usage: docker plugin disable [OPTIONS] PLUGIN + +Disable a plugin + +Options: + -f, --force Force the disable of an active plugin + --help Print usage +``` + +## Description + +Disables a plugin. The plugin must be installed before it can be disabled, +see [`docker plugin install`](plugin_install.md). Without the `-f` option, +a plugin that has references (e.g., volumes, networks) cannot be disabled. + +## Examples + +The following example shows that the `sample-volume-plugin` plugin is installed +and enabled: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +To disable the plugin, use the following command: + +```bash +$ docker plugin disable tiborvass/sample-volume-plugin + +tiborvass/sample-volume-plugin + +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/components/engine/docs/reference/commandline/plugin_enable.md b/components/engine/docs/reference/commandline/plugin_enable.md new file mode 100644 index 0000000000..2098a115ad --- /dev/null +++ b/components/engine/docs/reference/commandline/plugin_enable.md @@ -0,0 +1,68 @@ +--- +title: "plugin enable" +description: "the plugin enable command description and usage" +keywords: "plugin, enable" +--- + + + +# plugin enable + +```markdown +Usage: docker plugin enable [OPTIONS] PLUGIN + +Enable a plugin + +Options: + --help Print usage + --timeout int HTTP client timeout (in seconds) +``` + +## Description + +Enables a plugin. The plugin must be installed before it can be enabled, +see [`docker plugin install`](plugin_install.md). + +## Examples + +The following example shows that the `sample-volume-plugin` plugin is installed, +but disabled: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false +``` + +To enable the plugin, use the following command: + +```bash +$ docker plugin enable tiborvass/sample-volume-plugin + +tiborvass/sample-volume-plugin + +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/components/engine/docs/reference/commandline/plugin_inspect.md b/components/engine/docs/reference/commandline/plugin_inspect.md new file mode 100644 index 0000000000..e1a6403343 --- /dev/null +++ b/components/engine/docs/reference/commandline/plugin_inspect.md @@ -0,0 +1,166 @@ +--- +title: "plugin inspect" +description: "The plugin inspect command description and usage" +keywords: "plugin, inspect" +--- + + + +# plugin inspect + +```markdown +Usage: docker plugin inspect [OPTIONS] PLUGIN [PLUGIN...] + +Display detailed information on one or more plugins + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +Returns information about a plugin. By default, this command renders all results +in a JSON array. + +## Examples + + +```none +$ docker plugin inspect tiborvass/sample-volume-plugin:latest + +{ + "Id": "8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21", + "Name": "tiborvass/sample-volume-plugin:latest", + "PluginReference": "tiborvas/sample-volume-plugin:latest", + "Enabled": true, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-sample-volume-plugin", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } +} +``` + +(output formatted for readability) + +### Formatting the output + +```bash +$ docker plugin inspect -f '{{.Id}}' tiborvass/sample-volume-plugin:latest + +8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21 +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin enable](plugin_enable.md) +* [plugin disable](plugin_disable.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/components/engine/docs/reference/commandline/plugin_install.md b/components/engine/docs/reference/commandline/plugin_install.md new file mode 100644 index 0000000000..78d9a61b75 --- /dev/null +++ b/components/engine/docs/reference/commandline/plugin_install.md @@ -0,0 +1,75 @@ +--- +title: "plugin install" +description: "the plugin install command description and usage" +keywords: "plugin, install" +--- + + + +# plugin install + +```markdown +Usage: docker plugin install [OPTIONS] PLUGIN [KEY=VALUE...] + +Install a plugin + +Options: + --alias string Local name for plugin + --disable Do not enable the plugin on install + --disable-content-trust Skip image verification (default true) + --grant-all-permissions Grant all permissions necessary to run the plugin + --help Print usage +``` + +## Description + +Installs and enables a plugin. Docker looks first for the plugin on your Docker +host. If the plugin does not exist locally, then the plugin is pulled from +the registry. Note that the minimum required registry version to distribute +plugins is 2.3.0 + +## Examples + +The following example installs `vieus/sshfs` plugin and [sets](plugin_set.md) its +`DEBUG` environment variable to `1`. To install, `pull` the plugin from Docker +Hub and prompt the user to accept the list of privileges that the plugin needs, +set the plugin's parameters and enable the plugin. + +```bash +$ docker plugin install vieux/sshfs DEBUG=1 + +Plugin "vieux/sshfs" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +vieux/sshfs +``` + +After the plugin is installed, it appears in the list of plugins: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 vieux/sshfs latest sshFS plugin for Docker true +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/components/engine/docs/reference/commandline/plugin_ls.md b/components/engine/docs/reference/commandline/plugin_ls.md new file mode 100644 index 0000000000..3ba29fee03 --- /dev/null +++ b/components/engine/docs/reference/commandline/plugin_ls.md @@ -0,0 +1,118 @@ +--- +title: "plugin ls" +description: "The plugin ls command description and usage" +keywords: "plugin, list" +--- + + + +# plugin ls + +```markdown +Usage: docker plugin ls [OPTIONS] + +List plugins + +Aliases: + ls, list + +Options: + -f, --filter filter Provide filter values (e.g. 'enabled=true') + --format string Pretty-print plugins using a Go template + --help Print usage + --no-trunc Don't truncate output + -q, --quiet Only display plugin IDs +``` + +## Description + +Lists all the plugins that are currently installed. You can install plugins +using the [`docker plugin install`](plugin_install.md) command. +You can also filter using the `-f` or `--filter` flag. +Refer to the [filtering](#filtering) section for more information about available filter options. + +## Examples + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* enabled (boolean - true or false, 0 or 1) +* capability (string - currently `volumedriver`, `networkdriver`, `ipamdriver`, `logdriver`, `metricscollector`, or `authz`) + +#### enabled + +The `enabled` filter matches on plugins enabled or disabled. + +#### capability + +The `capability` filter matches on plugin capabilities. One plugin +might have multiple capabilities. Currently `volumedriver`, `networkdriver`, +`ipamdriver`, `logdriver`, `metricscollector`, and `authz` are supported capabilities. + +```bash +$ docker plugin install --disable tiborvass/no-remove + +tiborvass/no-remove + +$ docker plugin ls --filter enabled=true + +NAME TAG DESCRIPTION ENABLED +``` + +### Formatting + +The formatting options (`--format`) pretty-prints plugins output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +---------------|------------------------------------------------------------------------------------------ +`.ID` | Plugin ID +`.Name` | Plugin name +`.Description` | Plugin description +`.Enabled` | Whether plugin is enabled or not +`.PluginReference` | The reference used to push/pull from a registry + +When using the `--format` option, the `plugin ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Name` entries separated by a colon for all plugins: + +```bash +$ docker plugin ls --format "{{.ID}}: {{.Name}}" + +4be01827a72e: tiborvass/no-remove +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/components/engine/docs/reference/commandline/plugin_push.md b/components/engine/docs/reference/commandline/plugin_push.md new file mode 100644 index 0000000000..f444ed4d4d --- /dev/null +++ b/components/engine/docs/reference/commandline/plugin_push.md @@ -0,0 +1,57 @@ +--- +title: "plugin push" +description: "the plugin push command description and usage" +keywords: "plugin, push" +--- + + + +```markdown +Usage: docker plugin push [OPTIONS] PLUGIN[:TAG] + +Push a plugin to a registry + +Options: + --disable-content-trust Skip image signing (default true) + --help Print usage +``` + +## Description + +After you have created a plugin using `docker plugin create` and the plugin is +ready for distribution, use `docker plugin push` to share your images to Docker +Hub or a self-hosted registry. + +Registry credentials are managed by [docker login](login.md). + +## Examples + +The following example shows how to push a sample `user/plugin`. + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d456 user/plugin latest A sample plugin for Docker false + +$ docker plugin push user/plugin +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/components/engine/docs/reference/commandline/plugin_rm.md b/components/engine/docs/reference/commandline/plugin_rm.md new file mode 100644 index 0000000000..d599ed8fc1 --- /dev/null +++ b/components/engine/docs/reference/commandline/plugin_rm.md @@ -0,0 +1,63 @@ +--- +title: "plugin rm" +description: "the plugin rm command description and usage" +keywords: "plugin, rm" +--- + + + +# plugin rm + +```markdown +Usage: docker plugin rm [OPTIONS] PLUGIN [PLUGIN...] + +Remove one or more plugins + +Aliases: + rm, remove + +Options: + -f, --force Force the removal of an active plugin + --help Print usage +``` + +## Description + +Removes a plugin. You cannot remove a plugin if it is enabled, you must disable +a plugin using the [`docker plugin disable`](plugin_disable.md) before removing +it (or use --force, use of force is not recommended, since it can affect +functioning of running containers using the plugin). + +## Examples + +The following example disables and removes the `sample-volume-plugin:latest` +plugin: + +```bash +$ docker plugin disable tiborvass/sample-volume-plugin + +tiborvass/sample-volume-plugin + +$ docker plugin rm tiborvass/sample-volume-plugin:latest + +tiborvass/sample-volume-plugin +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/components/engine/docs/reference/commandline/plugin_set.md b/components/engine/docs/reference/commandline/plugin_set.md new file mode 100644 index 0000000000..ea7d92e735 --- /dev/null +++ b/components/engine/docs/reference/commandline/plugin_set.md @@ -0,0 +1,172 @@ +--- +title: "plugin set" +description: "the plugin set command description and usage" +keywords: "plugin, set" +--- + + + +# plugin set + +```markdown +Usage: docker plugin set PLUGIN KEY=VALUE [KEY=VALUE...] + +Change settings for a plugin + +Options: + --help Print usage +``` + +## Description + +Change settings for a plugin. The plugin must be disabled. + +The settings currently supported are: + * env variables + * source of mounts + * path of devices + * args + +## What is settable ? + +Look at the plugin manifest, it's easy to see what fields are settable, +by looking at the `Settable` field. + +Here is an extract of a plugin manifest: + +``` +{ + "config": { + ... + "args": { + "name": "myargs", + "settable": ["value"], + "value": ["foo", "bar"] + }, + "env": [ + { + "name": "DEBUG", + "settable": ["value"], + "value": "0" + }, + { + "name": "LOGGING", + "value": "1" + } + ], + "devices": [ + { + "name": "mydevice", + "path": "/dev/foo", + "settable": ["path"] + } + ], + "mounts": [ + { + "destination": "/baz", + "name": "mymount", + "options": ["rbind"], + "settable": ["source"], + "source": "/foo", + "type": "bind" + } + ], + ... + } +} +``` + +In this example, we can see that the `value` of the `DEBUG` environment variable is settable, +the `source` of the `mymount` mount is also settable. Same for the `path` of `mydevice` and `value` of `myargs`. + +On the contrary, the `LOGGING` environment variable doesn't have any settable field, which implies that user cannot tweak it. + +## Examples + +### Change an environment variable + +The following example change the env variable `DEBUG` on the +`sample-volume-plugin` plugin. + +```bash +$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin +[DEBUG=0] + +$ docker plugin set tiborvass/sample-volume-plugin DEBUG=1 + +$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin +[DEBUG=1] +``` + +### Change the source of a mount + +The following example change the source of the `mymount` mount on +the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin +/foo + +$ docker plugins set myplugin mymount.source=/bar + +$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin +/bar +``` + +> **Note**: Since only `source` is settable in `mymount`, +> `docker plugins set mymount=/bar myplugin` would work too. + +### Change a device path + +The following example change the path of the `mydevice` device on +the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin + +/dev/foo + +$ docker plugins set myplugin mydevice.path=/dev/bar + +$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin + +/dev/bar +``` + +> **Note**: Since only `path` is settable in `mydevice`, +> `docker plugins set mydevice=/dev/bar myplugin` would work too. + +### Change the source of the arguments + +The following example change the value of the args on the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{.Settings.Args}}' myplugin + +["foo", "bar"] + +$ docker plugins set myplugin myargs="foo bar baz" + +$ docker plugin inspect -f '{{.Settings.Args}}' myplugin + +["foo", "bar", "baz"] +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/components/engine/docs/reference/commandline/plugin_upgrade.md b/components/engine/docs/reference/commandline/plugin_upgrade.md new file mode 100644 index 0000000000..8c79ebdabf --- /dev/null +++ b/components/engine/docs/reference/commandline/plugin_upgrade.md @@ -0,0 +1,100 @@ +--- +title: "plugin upgrade" +description: "the plugin upgrade command description and usage" +keywords: "plugin, upgrade" +--- + + + +# plugin upgrade + +```markdown +Usage: docker plugin upgrade [OPTIONS] PLUGIN [REMOTE] + +Upgrade a plugin + +Options: + --disable-content-trust Skip image verification (default true) + --grant-all-permissions Grant all permissions necessary to run the plugin + --help Print usage + --skip-remote-check Do not check if specified remote plugin matches existing plugin image +``` + +## Description + +Upgrades an existing plugin to the specified remote plugin image. If no remote +is specified, Docker will re-pull the current image and use the updated version. +All existing references to the plugin will continue to work. +The plugin must be disabled before running the upgrade. + +## Examples + +The following example installs `vieus/sshfs` plugin, uses it to create and use +a volume, then upgrades the plugin. + +```bash +$ docker plugin install vieux/sshfs DEBUG=1 + +Plugin "vieux/sshfs:next" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +vieux/sshfs:next + +$ docker volume create -d vieux/sshfs:next -o sshcmd=root@1.2.3.4:/tmp/shared -o password=XXX sshvolume + +sshvolume + +$ docker run -it -v sshvolume:/data alpine sh -c "touch /data/hello" + +$ docker plugin disable -f vieux/sshfs:next + +viex/sshfs:next + +# Here docker volume ls doesn't show 'sshfsvolume', since the plugin is disabled +$ docker volume ls + +DRIVER VOLUME NAME + +$ docker plugin upgrade vieux/sshfs:next vieux/sshfs:next + +Plugin "vieux/sshfs:next" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +Upgrade plugin vieux/sshfs:next to vieux/sshfs:next + +$ docker plugin enable vieux/sshfs:next + +viex/sshfs:next + +$ docker volume ls + +DRIVER VOLUME NAME +viuex/sshfs:next sshvolume + +$ docker run -it -v sshvolume:/data alpine sh -c "ls /data" + +hello +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) diff --git a/components/engine/docs/reference/commandline/port.md b/components/engine/docs/reference/commandline/port.md new file mode 100644 index 0000000000..c38763ea34 --- /dev/null +++ b/components/engine/docs/reference/commandline/port.md @@ -0,0 +1,47 @@ +--- +title: "port" +description: "The port command description and usage" +keywords: "port, mapping, container" +--- + + + +# port + +```markdown +Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] + +List port mappings or a specific mapping for the container + +Options: + --help Print usage +``` + +## Examples + +### Show all mapped ports + +You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or +just a specific mapping: + +```bash +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test +$ docker port test +7890/tcp -> 0.0.0.0:4321 +9876/tcp -> 0.0.0.0:1234 +$ docker port test 7890/tcp +0.0.0.0:4321 +$ docker port test 7890/udp +2014/06/24 11:53:36 Error: No public port '7890/udp' published for test +$ docker port test 7890 +0.0.0.0:4321 +``` diff --git a/components/engine/docs/reference/commandline/ps.md b/components/engine/docs/reference/commandline/ps.md new file mode 100644 index 0000000000..51bab4834d --- /dev/null +++ b/components/engine/docs/reference/commandline/ps.md @@ -0,0 +1,432 @@ +--- +title: "ps" +description: "The ps command description and usage" +keywords: "container, running, list" +--- + + + +# ps + +```markdown +Usage: docker ps [OPTIONS] + +List containers + +Options: + -a, --all Show all containers (default shows just running) + -f, --filter value Filter output based on conditions provided (default []) + - ancestor=([:tag]||) + containers created from an image or a descendant. + - before=(|) + - expose=([/]|/[]) + - exited= an exit code of + - health=(starting|healthy|unhealthy|none) + - id= a container's ID + - isolation=(`default`|`process`|`hyperv`) (Windows daemon only) + - is-task=(true|false) + - label= or label== + - name= a container's name + - network=(|) + - publish=([/]|/[]) + - since=(|) + - status=(created|restarting|removing|running|paused|exited) + - volume=(|) + --format string Pretty-print containers using a Go template + --help Print usage + -n, --last int Show n last created containers (includes all states) (default -1) + -l, --latest Show the latest created container (includes all states) + --no-trunc Don't truncate output + -q, --quiet Only display numeric IDs + -s, --size Display total file sizes +``` + +## Examples + +### Prevent truncating output + +Running `docker ps --no-trunc` showing 2 linked containers. + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp +d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db +``` + +### Show both running and stopped containers + +The `docker ps` command only shows running containers by default. To see all +containers, use the `-a` (or `--all`) flag: + +```bash +$ docker ps -a +``` + +`docker ps` groups exposed ports into a single range if possible. E.g., a +container that exposes TCP ports `100, 101, 102` displays `100-102/tcp` in +the `PORTS` column. + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* id (container's id) +* label (`label=` or `label==`) +* name (container's name) +* exited (int - the code of exited containers. Only useful with `--all`) +* status (`created|restarting|running|removing|paused|exited|dead`) +* ancestor (`[:]`, `` or ``) - filters containers that were created from the given image or a descendant. +* before (container's id or name) - filters containers created before given id or name +* since (container's id or name) - filters containers created since given id or name +* isolation (`default|process|hyperv`) (Windows daemon only) +* volume (volume name or mount point) - filters containers that mount volumes. +* network (network id or name) - filters containers connected to the provided network +* health (starting|healthy|unhealthy|none) - filters containers based on healthcheck status +* publish=(container's published port) - filters published ports by containers +* expose=(container's exposed port) - filters exposed ports by containers + +#### label + +The `label` filter matches containers based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches containers with the `color` label regardless of its value. + +```bash +$ docker ps --filter "label=color" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +673394ef1d4c busybox "top" 47 seconds ago Up 45 seconds nostalgic_shockley +d85756f57265 busybox "top" 52 seconds ago Up 51 seconds high_albattani +``` + +The following filter matches containers with the `color` label with the `blue` value. + +```bash +$ docker ps --filter "label=color=blue" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +d85756f57265 busybox "top" About a minute ago Up About a minute high_albattani +``` + +#### name + +The `name` filter matches on all or part of a container's name. + +The following filter matches all containers with a name containing the `nostalgic_stallman` string. + +```bash +$ docker ps --filter "name=nostalgic_stallman" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9b6247364a03 busybox "top" 2 minutes ago Up 2 minutes nostalgic_stallman +``` + +You can also filter for a substring in a name as this shows: + +```bash +$ docker ps --filter "name=nostalgic" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +715ebfcee040 busybox "top" 3 seconds ago Up 1 second i_am_nostalgic +9b6247364a03 busybox "top" 7 minutes ago Up 7 minutes nostalgic_stallman +673394ef1d4c busybox "top" 38 minutes ago Up 38 minutes nostalgic_shockley +``` + +#### exited + +The `exited` filter matches containers by exist status code. For example, to +filter for containers that have exited successfully: + +```bash +$ docker ps -a --filter 'exited=0' + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +ea09c3c82f6e registry:latest /srv/run.sh 2 weeks ago Exited (0) 2 weeks ago 127.0.0.1:5000->5000/tcp desperate_leakey +106ea823fe4e fedora:latest /bin/sh -c 'bash -l' 2 weeks ago Exited (0) 2 weeks ago determined_albattani +48ee228c9464 fedora:20 bash 2 weeks ago Exited (0) 2 weeks ago tender_torvalds +``` + +#### Filter by exit signal + +You can use a filter to locate containers that exited with status of `137` +meaning a `SIGKILL(9)` killed them. + +```none +$ docker ps -a --filter 'exited=137' + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +b3e1c0ed5bfe ubuntu:latest "sleep 1000" 12 seconds ago Exited (137) 5 seconds ago grave_kowalevski +a2eb5558d669 redis:latest "/entrypoint.sh redi 2 hours ago Exited (137) 2 hours ago sharp_lalande +``` + +Any of these events result in a `137` status: + +* the `init` process of the container is killed manually +* `docker kill` kills the container +* Docker daemon restarts which kills all running containers + +#### status + +The `status` filter matches containers by status. You can filter using +`created`, `restarting`, `running`, `removing`, `paused`, `exited` and `dead`. For example, +to filter for `running` containers: + +```bash +$ docker ps --filter status=running + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +715ebfcee040 busybox "top" 16 minutes ago Up 16 minutes i_am_nostalgic +d5c976d3c462 busybox "top" 23 minutes ago Up 23 minutes top +9b6247364a03 busybox "top" 24 minutes ago Up 24 minutes nostalgic_stallman +``` + +To filter for `paused` containers: + +```bash +$ docker ps --filter status=paused + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +673394ef1d4c busybox "top" About an hour ago Up About an hour (Paused) nostalgic_shockley +``` + +#### ancestor + +The `ancestor` filter matches containers based on its image or a descendant of +it. The filter supports the following image representation: + +- image +- image:tag +- image:tag@digest +- short-id +- full-id + +If you don't specify a `tag`, the `latest` tag is used. For example, to filter +for containers that use the latest `ubuntu` image: + +```bash +$ docker ps --filter ancestor=ubuntu + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace +5d1e4a540723 ubuntu-c2 "top" About a minute ago Up About a minute admiring_sammet +82a598284012 ubuntu "top" 3 minutes ago Up 3 minutes sleepy_bose +bab2a34ba363 ubuntu "top" 3 minutes ago Up 3 minutes focused_yonath +``` + +Match containers based on the `ubuntu-c1` image which, in this case, is a child +of `ubuntu`: + +```bash +$ docker ps --filter ancestor=ubuntu-c1 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace +``` + +Match containers based on the `ubuntu` version `12.04.5` image: + +```bash +$ docker ps --filter ancestor=ubuntu:12.04.5 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose +``` + +The following matches containers based on the layer `d0e008c6cf02` or an image +that have this layer in its layer stack. + +```bash +$ docker ps --filter ancestor=d0e008c6cf02 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose +``` + +#### Create time + +##### before + +The `before` filter shows only containers created before the container with +given id or name. For example, having these containers created: + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9c3527ed70ce busybox "top" 14 seconds ago Up 15 seconds desperate_dubinsky +4aace5031105 busybox "top" 48 seconds ago Up 49 seconds focused_hamilton +6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat +``` + +Filtering with `before` would give: + +```bash +$ docker ps -f before=9c3527ed70ce + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4aace5031105 busybox "top" About a minute ago Up About a minute focused_hamilton +6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat +``` + +##### since + +The `since` filter shows only containers created since the container with given +id or name. For example, with the same containers as in `before` filter: + +```bash +$ docker ps -f since=6e63f6ff38b0 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9c3527ed70ce busybox "top" 10 minutes ago Up 10 minutes desperate_dubinsky +4aace5031105 busybox "top" 10 minutes ago Up 10 minutes focused_hamilton +``` + +#### volume + +The `volume` filter shows only containers that mount a specific volume or have +a volume mounted in a specific path: + +```bash +$ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" +CONTAINER ID MOUNTS +9c3527ed70ce remote-volume + +$ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" +CONTAINER ID MOUNTS +9c3527ed70ce remote-volume +``` + +#### network + +The `network` filter shows only containers that are connected to a network with +a given name or id. + +The following filter matches all containers that are connected to a network +with a name containing `net1`. + +```bash +$ docker run -d --net=net1 --name=test1 ubuntu top +$ docker run -d --net=net2 --name=test2 ubuntu top + +$ docker ps --filter network=net1 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 +``` + +The network filter matches on both the network's name and id. The following +example shows all containers that are attached to the `net1` network, using +the network id as a filter; + +```bash +$ docker network inspect --format "{{.ID}}" net1 + +8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 + +$ docker ps --filter network=8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 +``` + +#### publish and expose + +The `publish` and `expose` filters show only containers that have published or exposed port with a given port +number, port range, and/or protocol. The default protocol is `tcp` when not specified. + +The following filter matches all containers that have published port of 80: + +```bash +$ docker run -d --publish=80 busybox top +$ docker run -d --expose=8080 busybox top + +$ docker ps -a + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9833437217a5 busybox "top" 5 seconds ago Up 4 seconds 8080/tcp dreamy_mccarthy +fc7e477723b7 busybox "top" 50 seconds ago Up 50 seconds 0.0.0.0:32768->80/tcp admiring_roentgen + +$ docker ps --filter publish=80 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +fc7e477723b7 busybox "top" About a minute ago Up About a minute 0.0.0.0:32768->80/tcp admiring_roentgen +``` + +The following filter matches all containers that have exposed TCP port in the range of `8000-8080`: +```bash +$ docker ps --filter expose=8000-8080/tcp + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9833437217a5 busybox "top" 21 seconds ago Up 19 seconds 8080/tcp dreamy_mccarthy +``` + +The following filter matches all containers that have exposed UDP port `80`: +```bash +$ docker ps --filter publish=80/udp + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +``` + +### Formatting + +The formatting option (`--format`) pretty-prints container output using a Go +template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +--------------|---------------------------------------------------------------------------------------------------- +`.ID` | Container ID +`.Image` | Image ID +`.Command` | Quoted command +`.CreatedAt` | Time when the container was created. +`.RunningFor` | Elapsed time since the container was started. +`.Ports` | Exposed ports. +`.Status` | Container status. +`.Size` | Container disk size. +`.Names` | Container names. +`.Labels` | All labels assigned to the container. +`.Label` | Value of a specific label for this container. For example `'{{.Label "com.docker.swarm.cpu"}}'` +`.Mounts` | Names of the volumes mounted in this container. +`.Networks` | Names of the networks attached to this container. + +When using the `--format` option, the `ps` command will either output the data +exactly as the template declares or, when using the `table` directive, includes +column headers as well. + +The following example uses a template without headers and outputs the `ID` and +`Command` entries separated by a colon for all running containers: + +```bash +$ docker ps --format "{{.ID}}: {{.Command}}" + +a87ecb4f327c: /bin/sh -c #(nop) MA +01946d9d34d8: /bin/sh -c #(nop) MA +c1d3b0166030: /bin/sh -c yum -y up +41d50ecd2f57: /bin/sh -c #(nop) MA +``` + +To list all running containers with their labels in a table format you can use: + +```bash +$ docker ps --format "table {{.ID}}\t{{.Labels}}" + +CONTAINER ID LABELS +a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd +01946d9d34d8 +c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 +41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd +``` diff --git a/components/engine/docs/reference/commandline/pull.md b/components/engine/docs/reference/commandline/pull.md new file mode 100644 index 0000000000..7bf3df8363 --- /dev/null +++ b/components/engine/docs/reference/commandline/pull.md @@ -0,0 +1,254 @@ +--- +title: "pull" +description: "The pull command description and usage" +keywords: "pull, image, hub, docker" +--- + + + +# pull + +```markdown +Usage: docker pull [OPTIONS] NAME[:TAG|@DIGEST] + +Pull an image or a repository from a registry + +Options: + -a, --all-tags Download all tagged images in the repository + --disable-content-trust Skip image verification (default true) + --help Print usage +``` + +## Description + +Most of your images will be created on top of a base image from the +[Docker Hub](https://hub.docker.com) registry. + +[Docker Hub](https://hub.docker.com) contains many pre-built images that you +can `pull` and try without needing to define and configure your own. + +To download a particular image, or set of images (i.e., a repository), +use `docker pull`. + +### Proxy configuration + +If you are behind an HTTP proxy server, for example in corporate settings, +before open a connect to registry, you may need to configure the Docker +daemon's proxy settings, using the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` +environment variables. To set these environment variables on a host using +`systemd`, refer to the [control and configure Docker with systemd](https://docs.docker.com/engine/admin/systemd/#http-proxy) +for variables configuration. + +### Concurrent downloads + +By default the Docker daemon will pull three layers of an image at a time. +If you are on a low bandwidth connection this may cause timeout issues and you may want to lower +this via the `--max-concurrent-downloads` daemon option. See the +[daemon documentation](dockerd.md) for more details. + +## Examples + +### Pull an image from Docker Hub + +To download a particular image, or set of images (i.e., a repository), use +`docker pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a +default. This command pulls the `debian:latest` image: + +```bash +$ docker pull debian + +Using default tag: latest +latest: Pulling from library/debian +fdd5d7827f33: Pull complete +a3ed95caeb02: Pull complete +Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa +Status: Downloaded newer image for debian:latest +``` + +Docker images can consist of multiple layers. In the example above, the image +consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`. + +Layers can be reused by images. For example, the `debian:jessie` image shares +both layers with `debian:latest`. Pulling the `debian:jessie` image therefore +only pulls its metadata, but not its layers, because all layers are already +present locally: + +```bash +$ docker pull debian:jessie + +jessie: Pulling from library/debian +fdd5d7827f33: Already exists +a3ed95caeb02: Already exists +Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e +Status: Downloaded newer image for debian:jessie +``` + +To see which images are present locally, use the [`docker images`](images.md) +command: + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +debian jessie f50f9524513f 5 days ago 125.1 MB +debian latest f50f9524513f 5 days ago 125.1 MB +``` + +Docker uses a content-addressable image store, and the image ID is a SHA256 +digest covering the image's configuration and layers. In the example above, +`debian:jessie` and `debian:latest` have the same image ID because they are +actually the *same* image tagged with different names. Because they are the +same image, their layers are stored only once and do not consume extra disk +space. + +For more information about images, layers, and the content-addressable store, +refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/). + + +### Pull an image by digest (immutable identifier) + +So far, you've pulled images by their name (and "tag"). Using names and tags is +a convenient way to work with images. When using tags, you can `docker pull` an +image again to make sure you have the most up-to-date version of that image. +For example, `docker pull ubuntu:14.04` pulls the latest version of the Ubuntu +14.04 image. + +In some cases you don't want images to be updated to newer versions, but prefer +to use a fixed version of an image. Docker enables you to pull an image by its +*digest*. When pulling an image by digest, you specify *exactly* which version +of an image to pull. Doing so, allows you to "pin" an image to that version, +and guarantee that the image you're using is always the same. + +To know the digest of an image, pull the image first. Let's pull the latest +`ubuntu:14.04` image from Docker Hub: + +```bash +$ docker pull ubuntu:14.04 + +14.04: Pulling from library/ubuntu +5a132a7e7af1: Pull complete +fd2731e4c50c: Pull complete +28a2f68d1120: Pull complete +a3ed95caeb02: Pull complete +Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +Status: Downloaded newer image for ubuntu:14.04 +``` + +Docker prints the digest of the image after the pull has finished. In the example +above, the digest of the image is: + + sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +Docker also prints the digest of an image when *pushing* to a registry. This +may be useful if you want to pin to a version of the image you just pushed. + +A digest takes the place of the tag when pulling an image, for example, to +pull the above image by digest, run the following command: + +```bash +$ docker pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu +5a132a7e7af1: Already exists +fd2731e4c50c: Already exists +28a2f68d1120: Already exists +a3ed95caeb02: Already exists +Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +``` + +Digest can also be used in the `FROM` of a Dockerfile, for example: + +```Dockerfile +FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +MAINTAINER some maintainer +``` + +> **Note**: Using this feature "pins" an image to a specific version in time. +> Docker will therefore not pull updated versions of an image, which may include +> security updates. If you want to pull an updated image, you need to change the +> digest accordingly. + + +### Pull from a different registry + +By default, `docker pull` pulls images from [Docker Hub](https://hub.docker.com). It is also possible to +manually specify the path of a registry to pull from. For example, if you have +set up a local registry, you can specify its path to pull from it. A registry +path is similar to a URL, but does not contain a protocol specifier (`https://`). + +The following command pulls the `testing/test-image` image from a local registry +listening on port 5000 (`myregistry.local:5000`): + +```bash +$ docker pull myregistry.local:5000/testing/test-image +``` + +Registry credentials are managed by [docker login](login.md). + +Docker uses the `https://` protocol to communicate with a registry, unless the +registry is allowed to be accessed over an insecure connection. Refer to the +[insecure registries](dockerd.md#insecure-registries) section for more information. + + +### Pull a repository with multiple images + +By default, `docker pull` pulls a *single* image from the registry. A repository +can contain multiple images. To pull all images from a repository, provide the +`-a` (or `--all-tags`) option when using `docker pull`. + +This command pulls all images from the `fedora` repository: + +```bash +$ docker pull --all-tags fedora + +Pulling repository fedora +ad57ef8d78d7: Download complete +105182bb5e8b: Download complete +511136ea3c5a: Download complete +73bd853d2ea5: Download complete +.... + +Status: Downloaded newer image for fedora +``` + +After the pull has completed use the `docker images` command to see the +images that were pulled. The example below shows all the `fedora` images +that are present locally: + +```bash +$ docker images fedora + +REPOSITORY TAG IMAGE ID CREATED SIZE +fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB +fedora 20 105182bb5e8b 5 days ago 372.7 MB +fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB +fedora latest 105182bb5e8b 5 days ago 372.7 MB +``` + +### Cancel a pull + +Killing the `docker pull` process, for example by pressing `CTRL-c` while it is +running in a terminal, will terminate the pull operation. + +```bash +$ docker pull fedora + +Using default tag: latest +latest: Pulling from library/fedora +a3ed95caeb02: Pulling fs layer +236608c7b546: Pulling fs layer +^C +``` + +> **Note**: Technically, the Engine terminates a pull operation when the +> connection between the Docker Engine daemon and the Docker Engine client +> initiating the pull is lost. If the connection with the Engine daemon is +> lost for other reasons than a manual interaction, the pull is also aborted. diff --git a/components/engine/docs/reference/commandline/push.md b/components/engine/docs/reference/commandline/push.md new file mode 100644 index 0000000000..61c37139fd --- /dev/null +++ b/components/engine/docs/reference/commandline/push.md @@ -0,0 +1,82 @@ +--- +title: "push" +description: "The push command description and usage" +keywords: "share, push, image" +--- + + + +# push + +```markdown +Usage: docker push [OPTIONS] NAME[:TAG] + +Push an image or a repository to a registry + +Options: + --disable-content-trust Skip image signing (default true) + --help Print usage +``` + +## Description + +Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) +registry or to a self-hosted one. + +Refer to the [`docker tag`](tag.md) reference for more information about valid +image and tag names. + +Killing the `docker push` process, for example by pressing `CTRL-c` while it is +running in a terminal, terminates the push operation. + +Progress bars are shown during docker push, which show the uncompressed size. The +actual amount of data that's pushed will be compressed before sending, so the uploaded + size will not be reflected by the progress bar. + +Registry credentials are managed by [docker login](login.md). + +### Concurrent uploads + +By default the Docker daemon will push five layers of an image at a time. +If you are on a low bandwidth connection this may cause timeout issues and you may want to lower +this via the `--max-concurrent-uploads` daemon option. See the +[daemon documentation](dockerd.md) for more details. + +## Examples + +### Push a new image to a registry + +First save the new image by finding the container ID (using [`docker ps`](ps.md)) +and then committing it to a new image name. Note that only `a-z0-9-_.` are +allowed when naming images: + +```bash +$ docker commit c16378f943fe rhel-httpd +``` + +Now, push the image to the registry using the image ID. In this example the +registry is on host named `registry-host` and listening on port `5000`. To do +this, tag the image with the host name or IP address, and the port of the +registry: + +```bash +$ docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd + +$ docker push registry-host:5000/myadmin/rhel-httpd +``` + +Check that this worked by running: + +```bash +$ docker images +``` + +You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` +listed. diff --git a/components/engine/docs/reference/commandline/rename.md b/components/engine/docs/reference/commandline/rename.md new file mode 100644 index 0000000000..90268a2a2c --- /dev/null +++ b/components/engine/docs/reference/commandline/rename.md @@ -0,0 +1,35 @@ +--- +title: "rename" +description: "The rename command description and usage" +keywords: "rename, docker, container" +--- + + + +# rename + +```markdown +Usage: docker rename CONTAINER NEW_NAME + +Rename a container + +Options: + --help Print usage +``` + +## Description + +The `docker rename` command renames a container. + +## Examples + +```bash +$ docker rename my_container my_new_container +``` diff --git a/components/engine/docs/reference/commandline/restart.md b/components/engine/docs/reference/commandline/restart.md new file mode 100644 index 0000000000..a2796afe33 --- /dev/null +++ b/components/engine/docs/reference/commandline/restart.md @@ -0,0 +1,32 @@ +--- +title: "restart" +description: "The restart command description and usage" +keywords: "restart, container, Docker" +--- + + + +# restart + +```markdown +Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] + +Restart one or more containers + +Options: + --help Print usage + -t, --time int Seconds to wait for stop before killing the container (default 10) +``` + +## Examples + +```bash +$ docker restart my_container +``` diff --git a/components/engine/docs/reference/commandline/rm.md b/components/engine/docs/reference/commandline/rm.md new file mode 100644 index 0000000000..8ee5b2874d --- /dev/null +++ b/components/engine/docs/reference/commandline/rm.md @@ -0,0 +1,100 @@ +--- +title: "rm" +description: "The rm command description and usage" +keywords: "remove, Docker, container" +--- + + + +# rm + +```markdown +Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] + +Remove one or more containers + +Options: + -f, --force Force the removal of a running container (uses SIGKILL) + --help Print usage + -l, --link Remove the specified link + -v, --volumes Remove the volumes associated with the container +``` + +## Examples + +### Remove a container + +This will remove the container referenced under the link +`/redis`. + +```bash +$ docker rm /redis + +/redis +``` + +### Remove a link specified with `--link` on the default bridge network + +This will remove the underlying link between `/webapp` and the `/redis` +containers on the default bridge network, removing all network communication +between the two containers. This does not apply when `--link` is used with +user-specified networks. + +```bash +$ docker rm --link /webapp/redis + +/webapp/redis +``` + +### Force-remove a running container + +This command will force-remove a running container. + +```bash +$ docker rm --force redis + +redis +``` + +The main process inside the container referenced under the link `redis` will receive +`SIGKILL`, then the container will be removed. + +### Remove all stopped containers + +```bash +$ docker rm $(docker ps -a -q) +``` + +This command will delete all stopped containers. The command +`docker ps -a -q` will return all existing container IDs and pass them to +the `rm` command which will delete them. Any running containers will not be +deleted. + +### Remove a container and its volumes + +```bash +$ docker rm -v redis +redis +``` + +This command will remove the container and any volumes associated with it. +Note that if a volume was specified with a name, it will not be removed. + +### Remove a container and selectively remove volumes + +```bash +$ docker create -v awesome:/foo -v /bar --name hello redis +hello +$ docker rm -v hello +``` + +In this example, the volume for `/foo` will remain intact, but the volume for +`/bar` will be removed. The same behavior holds for volumes inherited with +`--volumes-from`. diff --git a/components/engine/docs/reference/commandline/rmi.md b/components/engine/docs/reference/commandline/rmi.md new file mode 100644 index 0000000000..28e21d4398 --- /dev/null +++ b/components/engine/docs/reference/commandline/rmi.md @@ -0,0 +1,105 @@ +--- +title: "rmi" +description: "The rmi command description and usage" +keywords: "remove, image, Docker" +--- + + + +# rmi + +```markdown +Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] + +Remove one or more images + +Options: + -f, --force Force removal of the image + --help Print usage + --no-prune Do not delete untagged parents +``` + +## Examples + +You can remove an image using its short or long ID, its tag, or its digest. If +an image has one or more tag referencing it, you must remove all of them before +the image is removed. Digest references are removed automatically when an image +is removed by tag. + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) +test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) +test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + +$ docker rmi fd484f19954f + +Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force +2013/12/11 05:47:16 Error: failed to remove one or more images + +$ docker rmi test1 + +Untagged: test1:latest + +$ docker rmi test2 + +Untagged: test2:latest + + +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + +$ docker rmi test + +Untagged: test:latest +Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 +``` + +If you use the `-f` flag and specify the image's short or long ID, then this +command untags and removes all images that match the specified ID. + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) +test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) +test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + +$ docker rmi -f fd484f19954f + +Untagged: test1:latest +Untagged: test:latest +Untagged: test2:latest +Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 +``` + +An image pulled by digest has no tag associated with it: + +```bash +$ docker images --digests + +REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE +localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB +``` + +To remove an image using its digest: + +```bash +$ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 +Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2 +Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b +``` diff --git a/components/engine/docs/reference/commandline/run.md b/components/engine/docs/reference/commandline/run.md new file mode 100644 index 0000000000..ecb676dc20 --- /dev/null +++ b/components/engine/docs/reference/commandline/run.md @@ -0,0 +1,777 @@ +--- +title: "run" +description: "The run command description and usage" +keywords: "run, command, container" +--- + + + +# run + +```markdown +Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] + +Run a command in a new container + +Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + --blkio-weight value Block IO (relative weight), between 10 and 1000 + --blkio-weight-device value Block IO weight (relative device weight) (default []) + --cap-add value Add Linux capabilities (default []) + --cap-drop value Drop Linux capabilities (default []) + --cgroup-parent string Optional parent cgroup for the container + --cidfile string Write the container ID to the file + --cpu-count int The number of CPUs available for execution by the container. + Windows daemon only. On Windows Server containers, this is + approximated as a percentage of total CPU usage. + --cpu-percent int Limit percentage of CPU available for execution + by the container. Windows daemon only. + The processor resource controls are mutually + exclusive, the order of precedence is CPUCount + first, then CPUShares, and CPUPercent last. + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpus NanoCPUs Number of CPUs (default 0.000) + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + -d, --detach Run container in background and print container ID + --detach-keys string Override the key sequence for detaching a container + --device value Add a host device to the container (default []) + --device-cgroup-rule value Add a rule to the cgroup allowed devices list + --device-read-bps value Limit read rate (bytes per second) from a device (default []) + --device-read-iops value Limit read rate (IO per second) from a device (default []) + --device-write-bps value Limit write rate (bytes per second) to a device (default []) + --device-write-iops value Limit write rate (IO per second) to a device (default []) + --disable-content-trust Skip image verification (default true) + --dns value Set custom DNS servers (default []) + --dns-option value Set DNS options (default []) + --dns-search value Set custom DNS search domains (default []) + --entrypoint string Overwrite the default ENTRYPOINT of the image + -e, --env value Set environment variables (default []) + --env-file value Read in a file of environment variables (default []) + --expose value Expose a port or a range of ports (default []) + --group-add value Add additional groups to join (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) + --health-start-period duration Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h) (default 0s) + --help Print usage + -h, --hostname string Container host name + --init Run an init inside the container that forwards signals and reaps processes + -i, --interactive Keep STDIN open even if not attached + --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) + (Windows only). The format is ``. + Unit is optional and can be `b` (bytes per second), + `k` (kilobytes per second), `m` (megabytes per second), + or `g` (gigabytes per second). If you omit the unit, + the system uses bytes per second. + --io-maxbandwidth and --io-maxiops are mutually exclusive options. + --io-maxiops uint Maximum IOps limit for the system drive (Windows only) + --ip string IPv4 address (e.g., 172.30.100.104) + --ip6 string IPv6 address (e.g., 2001:db8::33) + --ipc string IPC namespace to use + --isolation string Container isolation technology + --kernel-memory string Kernel memory limit + -l, --label value Set meta data on a container (default []) + --label-file value Read in a line delimited file of labels (default []) + --link value Add link to another container (default []) + --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) + --log-driver string Logging driver for the container + --log-opt value Log driver options (default []) + --mac-address string Container MAC address (e.g., 92:d0:c6:0a:29:33) + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) + --mount value Attach a filesystem mount to the container (default []) + --name string Assign a name to the container + --network-alias value Add network-scoped alias for the container (default []) + --network string Connect a container to a network + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-healthcheck Disable any container-specified HEALTHCHECK + --oom-kill-disable Disable OOM Killer + --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) + --pid string PID namespace to use + --pids-limit int Tune container pids limit (set -1 for unlimited) + --privileged Give extended privileges to this container + -p, --publish value Publish a container's port(s) to the host (default []) + -P, --publish-all Publish all exposed ports to random ports + --read-only Mount the container's root filesystem as read only + --restart string Restart policy to apply when a container exits (default "no") + Possible values are : no, on-failure[:max-retry], always, unless-stopped + --rm Automatically remove the container when it exits + --runtime string Runtime to use for this container + --security-opt value Security Options (default []) + --shm-size bytes Size of /dev/shm + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --sig-proxy Proxy received signals to the process (default true) + --stop-signal string Signal to stop a container (default "SIGTERM") + --stop-timeout=10 Timeout (in seconds) to stop a container + --storage-opt value Storage driver options for the container (default []) + --sysctl value Sysctl options (default map[]) + --tmpfs value Mount a tmpfs directory (default []) + -t, --tty Allocate a pseudo-TTY + --ulimit value Ulimit options (default []) + -u, --user string Username or UID (format: [:]) + --userns string User namespace to use + 'host': Use the Docker host user namespace + '': Use the Docker daemon user namespace specified by `--userns-remap` option. + --uts string UTS namespace to use + -v, --volume value Bind mount a volume (default []). The format + is `[host-src:]container-dest[:]`. + The comma-delimited `options` are [rw|ro], + [z|Z], [[r]shared|[r]slave|[r]private], + [delegated|cached|consistent], and + [nocopy]. The 'host-src' is an absolute path + or a name value. + --volume-driver string Optional volume driver for the container + --volumes-from value Mount volumes from the specified container(s) (default []) + -w, --workdir string Working directory inside the container +``` + +## Description + +The `docker run` command first `creates` a writeable container layer over the +specified image, and then `starts` it using the specified command. That is, +`docker run` is equivalent to the API `/containers/create` then +`/containers/(id)/start`. A stopped container can be restarted with all its +previous changes intact using `docker start`. See `docker ps -a` to view a list +of all containers. + +The `docker run` command can be used in combination with `docker commit` to +[*change the command that a container runs*](commit.md). There is additional detailed information about `docker run` in the [Docker run reference](../run.md). + +For information on connecting a container to a network, see the ["*Docker network overview*"](https://docs.docker.com/engine/userguide/networking/). + +## Examples + +### Assign name and allocate pseudo-TTY (--name, -it) + +```bash +$ docker run --name test -it debian + +root@d6c0fe130dba:/# exit 13 +$ echo $? +13 +$ docker ps -a | grep test +d6c0fe130dba debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test +``` + +This example runs a container named `test` using the `debian:latest` +image. The `-it` instructs Docker to allocate a pseudo-TTY connected to +the container's stdin; creating an interactive `bash` shell in the container. +In the example, the `bash` shell is quit by entering +`exit 13`. This exit code is passed on to the caller of +`docker run`, and is recorded in the `test` container's metadata. + +### Capture container ID (--cidfile) + +```bash +$ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" +``` + +This will create a container and print `test` to the console. The `cidfile` +flag makes Docker attempt to create a new file and write the container ID to it. +If the file exists already, Docker will return an error. Docker will close this +file when `docker run` exits. + +### Full container capabilities (--privileged) + +```bash +$ docker run -t -i --rm ubuntu bash +root@bc338942ef20:/# mount -t tmpfs none /mnt +mount: permission denied +``` + +This will *not* work, because by default, most potentially dangerous kernel +capabilities are dropped; including `cap_sys_admin` (which is required to mount +filesystems). However, the `--privileged` flag will allow it to run: + +```bash +$ docker run -t -i --privileged ubuntu bash +root@50e3f57e16e6:/# mount -t tmpfs none /mnt +root@50e3f57e16e6:/# df -h +Filesystem Size Used Avail Use% Mounted on +none 1.9G 0 1.9G 0% /mnt +``` + +The `--privileged` flag gives *all* capabilities to the container, and it also +lifts all the limitations enforced by the `device` cgroup controller. In other +words, the container can then do almost everything that the host can do. This +flag exists to allow special use-cases, like running Docker within Docker. + +### Set working directory (-w) + +```bash +$ docker run -w /path/to/dir/ -i -t ubuntu pwd +``` + +The `-w` lets the command being executed inside directory given, here +`/path/to/dir/`. If the path does not exist it is created inside the container. + +### Set storage driver options per container + +```bash +$ docker run -it --storage-opt size=120G fedora /bin/bash +``` + +This (size) will allow to set the container rootfs size to 120G at creation time. +This option is only available for the `devicemapper`, `btrfs`, `overlay2`, +`windowsfilter` and `zfs` graph drivers. +For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, +user cannot pass a size less than the Default BaseFS Size. +For the `overlay2` storage driver, the size option is only available if the +backing fs is `xfs` and mounted with the `pquota` mount option. +Under these conditions, user can pass any size less then the backing fs size. + +### Mount tmpfs (--tmpfs) + +```bash +$ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image +``` + +The `--tmpfs` flag mounts an empty tmpfs into the container with the `rw`, +`noexec`, `nosuid`, `size=65536k` options. + +### Mount volume (-v, --read-only) + +```bash +$ docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd +``` + +The `-v` flag mounts the current working directory into the container. The `-w` +lets the command being executed inside the current working directory, by +changing into the directory to the value returned by `pwd`. So this +combination executes the command using the container, but inside the +current working directory. + +```bash +$ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash +``` + +When the host directory of a bind-mounted volume doesn't exist, Docker +will automatically create this directory on the host for you. In the +example above, Docker will create the `/doesnt/exist` +folder before starting your container. + +```bash +$ docker run --read-only -v /icanwrite busybox touch /icanwrite/here +``` + +Volumes can be used in combination with `--read-only` to control where +a container writes files. The `--read-only` flag mounts the container's root +filesystem as read only prohibiting writes to locations other than the +specified volumes for the container. + +```bash +$ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v /path/to/static-docker-binary:/usr/bin/docker busybox sh +``` + +By bind-mounting the docker unix socket and statically linked docker +binary (refer to [get the linux binary]( +https://docs.docker.com/engine/installation/binaries/#/get-the-linux-binary)), +you give the container the full access to create and manipulate the host's +Docker daemon. + +On Windows, the paths must be specified using Windows-style semantics. + +```powershell +PS C:\> docker run -v c:\foo:c:\dest microsoft/nanoserver cmd /s /c type c:\dest\somefile.txt +Contents of file + +PS C:\> docker run -v c:\foo:d: microsoft/nanoserver cmd /s /c type d:\somefile.txt +Contents of file +``` + +The following examples will fail when using Windows-based containers, as the +destination of a volume or bind-mount inside the container must be one of: +a non-existing or empty directory; or a drive other than C:. Further, the source +of a bind mount must be a local directory, not a file. + +```powershell +net use z: \\remotemachine\share +docker run -v z:\foo:c:\dest ... +docker run -v \\uncpath\to\directory:c:\dest ... +docker run -v c:\foo\somefile.txt:c:\dest ... +docker run -v c:\foo:c: ... +docker run -v c:\foo:c:\existing-directory-with-contents ... +``` + +For in-depth information about volumes, refer to [manage data in containers](https://docs.docker.com/engine/tutorials/dockervolumes/) + + +### Add bind-mounts or volumes using the --mount flag + +The `--mount` flag allows you to mount volumes, host-directories and `tmpfs` +mounts in a container. + +The `--mount` flag supports most options that are supported by the `-v` or the +`--volume` flag, but uses a different syntax. For in-depth information on the +`--mount` flag, and a comparison between `--volume` and `--mount`, refer to +the [service create command reference](service_create.md#add-bind-mounts-or-volumes). + +Even though there is no plan to deprecate `--volume`, usage of `--mount` is recommended. + +Examples: + +```bash +$ docker run --read-only --mount type=volume,target=/icanwrite busybox touch /icanwrite/here +``` + +```bash +$ docker run -t -i --mount type=bind,src=/data,dst=/data busybox sh +``` + +### Publish or expose port (-p, --expose) + +```bash +$ docker run -p 127.0.0.1:80:8080 ubuntu bash +``` + +This binds port `8080` of the container to port `80` on `127.0.0.1` of the host +machine. The [Docker User +Guide](https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/) +explains in detail how to manipulate ports in Docker. + +```bash +$ docker run --expose 80 ubuntu bash +``` + +This exposes port `80` of the container without publishing the port to the host +system's interfaces. + +### Set environment variables (-e, --env, --env-file) + +```bash +$ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash +``` + +Use the `-e`, `--env`, and `--env-file` flags to set simple (non-array) +environment variables in the container you're running, or overwrite variables +that are defined in the Dockerfile of the image you're running. + +You can define the variable and its value when running the container: + +```bash +$ docker run --env VAR1=value1 --env VAR2=value2 ubuntu env | grep VAR +VAR1=value1 +VAR2=value2 +``` + +You can also use variables that you've exported to your local environment: + +```bash +export VAR1=value1 +export VAR2=value2 + +$ docker run --env VAR1 --env VAR2 ubuntu env | grep VAR +VAR1=value1 +VAR2=value2 +``` + +When running the command, the Docker CLI client checks the value the variable +has in your local environment and passes it to the container. +If no `=` is provided and that variable is not exported in your local +environment, the variable won't be set in the container. + +You can also load the environment variables from a file. This file should use +the syntax `=value` (which sets the variable to the given value) or +`` (which takes the value from the local environment), and `#` for comments. + +```bash +$ cat env.list +# This is a comment +VAR1=value1 +VAR2=value2 +USER + +$ docker run --env-file env.list ubuntu env | grep VAR +VAR1=value1 +VAR2=value2 +USER=denis +``` + +### Set metadata on container (-l, --label, --label-file) + +A label is a `key=value` pair that applies metadata to a container. To label a container with two labels: + +```bash +$ docker run -l my-label --label com.example.foo=bar ubuntu bash +``` + +The `my-label` key doesn't specify a value so the label defaults to an empty +string(`""`). To add multiple labels, repeat the label flag (`-l` or `--label`). + +The `key=value` must be unique to avoid overwriting the label value. If you +specify labels with identical keys but different values, each subsequent value +overwrites the previous. Docker uses the last `key=value` you supply. + +Use the `--label-file` flag to load multiple labels from a file. Delimit each +label in the file with an EOL mark. The example below loads labels from a +labels file in the current directory: + +```bash +$ docker run --label-file ./labels ubuntu bash +``` + +The label-file format is similar to the format for loading environment +variables. (Unlike environment variables, labels are not visible to processes +running inside a container.) The following example illustrates a label-file +format: + +```none +com.example.label1="a label" + +# this is a comment +com.example.label2=another\ label +com.example.label3 +``` + +You can load multiple label-files by supplying multiple `--label-file` flags. + +For additional information on working with labels, see [*Labels - custom +metadata in Docker*](https://docs.docker.com/engine/userguide/labels-custom-metadata/) in the Docker User +Guide. + +### Connect a container to a network (--network) + +When you start a container use the `--network` flag to connect it to a network. +This adds the `busybox` container to the `my-net` network. + +```bash +$ docker run -itd --network=my-net busybox +``` + +You can also choose the IP addresses for the container with `--ip` and `--ip6` +flags when you start the container on a user-defined network. + +```bash +$ docker run -itd --network=my-net --ip=10.10.9.75 busybox +``` + +If you want to add a running container to a network use the `docker network connect` subcommand. + +You can connect multiple containers to the same network. Once connected, the +containers can communicate easily need only another container's IP address +or name. For `overlay` networks or custom plugins that support multi-host +connectivity, containers connected to the same multi-host network but launched +from different Engines can also communicate in this way. + +> **Note**: Service discovery is unavailable on the default bridge network. +> Containers can communicate via their IP addresses by default. To communicate +> by name, they must be linked. + +You can disconnect a container from a network using the `docker network +disconnect` command. + +### Mount volumes from container (--volumes-from) + +```bash +$ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd +``` + +The `--volumes-from` flag mounts all the defined volumes from the referenced +containers. Containers can be specified by repetitions of the `--volumes-from` +argument. The container ID may be optionally suffixed with `:ro` or `:rw` to +mount the volumes in read-only or read-write mode, respectively. By default, +the volumes are mounted in the same mode (read write or read only) as +the reference container. + +Labeling systems like SELinux require that proper labels are placed on volume +content mounted into a container. Without a label, the security system might +prevent the processes running inside the container from using the content. By +default, Docker does not change the labels set by the OS. + +To change the label in the container context, you can add either of two suffixes +`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file +objects on the shared volumes. The `z` option tells Docker that two containers +share the volume content. As a result, Docker labels the content with a shared +content label. Shared volume labels allow all containers to read/write content. +The `Z` option tells Docker to label the content with a private unshared label. +Only the current container can use a private volume. + +### Attach to STDIN/STDOUT/STDERR (-a) + +The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` +or `STDERR`. This makes it possible to manipulate the output and input as +needed. + +```bash +$ echo "test" | docker run -i -a stdin ubuntu cat - +``` + +This pipes data into a container and prints the container's ID by attaching +only to the container's `STDIN`. + +```bash +$ docker run -a stderr ubuntu echo test +``` + +This isn't going to print anything unless there's an error because we've +only attached to the `STDERR` of the container. The container's logs +still store what's been written to `STDERR` and `STDOUT`. + +```bash +$ cat somefile | docker run -i -a stdin mybuilder dobuild +``` + +This is how piping a file into a container could be done for a build. +The container's ID will be printed after the build is done and the build +logs could be retrieved using `docker logs`. This is +useful if you need to pipe a file or something else into a container and +retrieve the container's ID once the container has finished running. + +### Add host device to container (--device) + +```bash +$ docker run --device=/dev/sdc:/dev/xvdc \ + --device=/dev/sdd --device=/dev/zero:/dev/nulo \ + -i -t \ + ubuntu ls -l /dev/{xvdc,sdd,nulo} + +brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc +brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd +crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo +``` + +It is often necessary to directly expose devices to a container. The `--device` +option enables that. For example, a specific block storage device or loop +device or audio device can be added to an otherwise unprivileged container +(without the `--privileged` flag) and have the application directly access it. + +By default, the container will be able to `read`, `write` and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` +flag: + +```bash +$ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + +Command (m for help): q +$ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc +You will not be able to write the partition table. + +Command (m for help): q + +$ docker run --device=/dev/sda:/dev/xvdc:rw --rm -it ubuntu fdisk /dev/xvdc + +Command (m for help): q + +$ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc +fdisk: unable to open /dev/xvdc: Operation not permitted +``` + +> **Note**: `--device` cannot be safely used with ephemeral devices. Block devices +> that may be removed should not be added to untrusted containers with +> `--device`. + +### Restart policies (--restart) + +Use Docker's `--restart` to specify a container's *restart policy*. A restart +policy controls whether the Docker daemon restarts a container after exit. +Docker supports the following restart policies: + +| Policy | Result | +|:----------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `no` | Do not automatically restart the container when it exits. This is the default. | +| `failure` | Restart only if the container exits with a non-zero exit status. Optionally, limit the number of restart retries the Docker daemon attempts. | +| `always` | Always restart the container regardless of the exit status. When you specify always, the Docker daemon will try to restart the container indefinitely. The container will also always start on daemon startup, regardless of the current state of the container. | + +```bash +$ docker run --restart=always redis +``` + +This will run the `redis` container with a restart policy of **always** +so that if the container exits, Docker will restart it. + +More detailed information on restart policies can be found in the +[Restart Policies (--restart)](../run.md#restart-policies-restart) +section of the Docker run reference page. + +### Add entries to container hosts file (--add-host) + +You can add other hosts into a container's `/etc/hosts` file by using one or +more `--add-host` flags. This example adds a static address for a host named +`docker`: + +```bash +$ docker run --add-host=docker:10.180.0.1 --rm -it debian + +root@f38c87f2a42d:/# ping docker +PING docker (10.180.0.1): 48 data bytes +56 bytes from 10.180.0.1: icmp_seq=0 ttl=254 time=7.600 ms +56 bytes from 10.180.0.1: icmp_seq=1 ttl=254 time=30.705 ms +^C--- docker ping statistics --- +2 packets transmitted, 2 packets received, 0% packet loss +round-trip min/avg/max/stddev = 7.600/19.152/30.705/11.553 ms +``` + +Sometimes you need to connect to the Docker host from within your +container. To enable this, pass the Docker host's IP address to +the container using the `--add-host` flag. To find the host's address, +use the `ip addr show` command. + +The flags you pass to `ip addr show` depend on whether you are +using IPv4 or IPv6 networking in your containers. Use the following +flags for IPv4 address retrieval for a network device named `eth0`: + +```bash +$ HOSTIP=`ip -4 addr show scope global dev eth0 | grep inet | awk '{print \$2}' | cut -d / -f 1` +$ docker run --add-host=docker:${HOSTIP} --rm -it debian +``` + +For IPv6 use the `-6` flag instead of the `-4` flag. For other network +devices, replace `eth0` with the correct device name (for example `docker0` +for the bridge device). + +### Set ulimits in container (--ulimit) + +Since setting `ulimit` settings in a container requires extra privileges not +available in the default container, you can set these using the `--ulimit` flag. +`--ulimit` is specified with a soft and hard limit as such: +`=[:]`, for example: + +```bash +$ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n" +1024 +``` + +> **Note**: If you do not provide a `hard limit`, the `soft limit` will be used +> for both values. If no `ulimits` are set, they will be inherited from +> the default `ulimits` set on the daemon. `as` option is disabled now. +> In other words, the following script is not supported: +> +> ```bash +> $ docker run -it --ulimit as=1024 fedora /bin/bash` +> ``` + +The values are sent to the appropriate `syscall` as they are set. +Docker doesn't perform any byte conversion. Take this into account when setting the values. + +#### For `nproc` usage + +Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to set the +maximum number of processes available to a user, not to a container. For example, start four +containers with `daemon` user: + +```bash +$ docker run -d -u daemon --ulimit nproc=3 busybox top + +$ docker run -d -u daemon --ulimit nproc=3 busybox top + +$ docker run -d -u daemon --ulimit nproc=3 busybox top + +$ docker run -d -u daemon --ulimit nproc=3 busybox top +``` + +The 4th container fails and reports "[8] System error: resource temporarily unavailable" error. +This fails because the caller set `nproc=3` resulting in the first three containers using up +the three processes quota set for the `daemon` user. + +### Stop container with signal (--stop-signal) + +The `--stop-signal` flag sets the system call signal that will be sent to the container to exit. +This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, +or a signal name in the format SIGNAME, for instance SIGKILL. + +### Optional security options (--security-opt) + +On Windows, this flag can be used to specify the `credentialspec` option. +The `credentialspec` must be in the format `file://spec.txt` or `registry://keyname`. + +### Stop container with timeout (--stop-timeout) + +The `--stop-timeout` flag sets the timeout (in seconds) that a pre-defined (see `--stop-signal`) system call +signal that will be sent to the container to exit. After timeout elapses the container will be killed with SIGKILL. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation ` option sets a container's isolation technology. +On Linux, the only supported is the `default` option which uses +Linux namespaces. These two commands are equivalent on Linux: + +```bash +$ docker run -d busybox top +$ docker run -d --isolation default busybox top +``` + +On Windows, `--isolation` can take one of these values: + + +| Value | Description | +|:----------|:-------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` or system default (see below). | +| `process` | Shared-kernel namespace isolation (not supported on Windows client operating systems). | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +The default isolation on Windows server operating systems is `process`. The default (and only supported) +isolation on Windows client operating systems is `hyperv`. An attempt to start a container on a client +operating system with `--isolation process` will fail. + +On Windows server, assuming the default configuration, these commands are equivalent +and result in `process` isolation: + +```PowerShell +PS C:\> docker run -d microsoft/nanoserver powershell echo process +PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo process +PS C:\> docker run -d --isolation process microsoft/nanoserver powershell echo process +``` + +If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, or +are running against a Windows client-based daemon, these commands are equivalent and +result in `hyperv` isolation: + +```PowerShell +PS C:\> docker run -d microsoft/nanoserver powershell echo hyperv +PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo hyperv +PS C:\> docker run -d --isolation hyperv microsoft/nanoserver powershell echo hyperv +``` + +### Configure namespaced kernel parameters (sysctls) at runtime + +The `--sysctl` sets namespaced kernel parameters (sysctls) in the +container. For example, to turn on IP forwarding in the containers +network namespace, run this command: + +```bash +$ docker run --sysctl net.ipv4.ip_forward=1 someimage +``` + +> **Note**: Not all sysctls are namespaced. Docker does not support changing sysctls +> inside of a container that also modify the host system. As the kernel +> evolves we expect to see more sysctls become namespaced. + +#### Currently supported sysctls + +- `IPC Namespace`: + + ```none + kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced + Sysctls beginning with fs.mqueue.* + ``` + + If you use the `--ipc=host` option these sysctls will not be allowed. + +- `Network Namespace`: + + Sysctls beginning with net.* + + If you use the `--network=host` option using these sysctls will not be allowed. diff --git a/components/engine/docs/reference/commandline/save.md b/components/engine/docs/reference/commandline/save.md new file mode 100644 index 0000000000..cba7385e11 --- /dev/null +++ b/components/engine/docs/reference/commandline/save.md @@ -0,0 +1,62 @@ +--- +title: "save" +description: "The save command description and usage" +keywords: "tarred, repository, backup" +--- + + + +# save + +```markdown +Usage: docker save [OPTIONS] IMAGE [IMAGE...] + +Save one or more images to a tar archive (streamed to STDOUT by default) + +Options: + --help Print usage + -o, --output string Write to a file, instead of STDOUT +``` + +## Description + +Produces a tarred repository to the standard output stream. +Contains all parent layers, and all tags + versions, or specified `repo:tag`, for +each argument provided. + +## Examples + +### Create a backup that can then be used with `docker load`. + +```bash +$ docker save busybox > busybox.tar + +$ ls -sh busybox.tar + +2.7M busybox.tar + +$ docker save --output busybox.tar busybox + +$ ls -sh busybox.tar + +2.7M busybox.tar + +$ docker save -o fedora-all.tar fedora + +$ docker save -o fedora-latest.tar fedora:latest +``` + +### Cherry-pick particular tags + +You can even cherry-pick particular tags of an image repository. + +```bash +$ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy +``` diff --git a/components/engine/docs/reference/commandline/search.md b/components/engine/docs/reference/commandline/search.md new file mode 100644 index 0000000000..f645c78603 --- /dev/null +++ b/components/engine/docs/reference/commandline/search.md @@ -0,0 +1,149 @@ +--- +title: "search" +description: "The search command description and usage" +keywords: "search, hub, images" +--- + + + +# search + +```markdown +Usage: docker search [OPTIONS] TERM + +Search the Docker Hub for images + +Options: + -f, --filter value Filter output based on conditions provided (default []) + - is-automated=(true|false) + - is-official=(true|false) + - stars= - image has at least 'number' stars + --help Print usage + --limit int Max number of search results (default 25) + --no-trunc Don't truncate output +``` + +## Description + +Search [Docker Hub](https://hub.docker.com) for images + +See [*Find Public Images on Docker Hub*](https://docs.docker.com/engine/tutorials/dockerrepos/#searching-for-images) for +more details on finding shared images from the command line. + +> **Note**: Search queries return a maximum of 25 results. + +## Examples + +### Search images by name + +This example displays images with a name containing 'busybox': + +```none +$ docker search busybox + +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +busybox Busybox base image. 316 [OK] +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] +odise/busybox-python 2 [OK] +azukiapp/busybox This image is meant to be used as the base... 2 [OK] +ofayau/busybox-jvm Prepare busybox to install a 32 bits JVM. 1 [OK] +shingonoide/archlinux-busybox Arch Linux, a lightweight and flexible Lin... 1 [OK] +odise/busybox-curl 1 [OK] +ofayau/busybox-libc32 Busybox with 32 bits (and 64 bits) libs 1 [OK] +peelsky/zulu-openjdk-busybox 1 [OK] +skomma/busybox-data Docker image suitable for data volume cont... 1 [OK] +elektritter/busybox-teamspeak Lightweight teamspeak3 container based on... 1 [OK] +socketplane/busybox 1 [OK] +oveits/docker-nginx-busybox This is a tiny NginX docker image based on... 0 [OK] +ggtools/busybox-ubuntu Busybox ubuntu version with extra goodies 0 [OK] +nikfoundas/busybox-confd Minimal busybox based distribution of confd 0 [OK] +openshift/busybox-http-app 0 [OK] +jllopis/busybox 0 [OK] +swyckoff/busybox 0 [OK] +powellquiring/busybox 0 [OK] +williamyeh/busybox-sh Docker image for BusyBox's sh 0 [OK] +simplexsys/busybox-cli-powered Docker busybox images, with a few often us... 0 [OK] +fhisamoto/busybox-java Busybox java 0 [OK] +scottabernethy/busybox 0 [OK] +marclop/busybox-solr +``` + +### Display non-truncated description (--no-trunc) + +This example displays images with a name containing 'busybox', +at least 3 stars and the description isn't truncated in the output: + +```bash +$ docker search --stars=3 --no-trunc busybox +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +busybox Busybox base image. 325 [OK] +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors. 8 [OK] +``` + +### Limit search results (--limit) + +The flag `--limit` is the maximum number of results returned by a search. This value could +be in the range between 1 and 100. The default value of `--limit` is 25. + + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* stars (int - number of stars the image has) +* is-automated (true|false) - is the image automated or not +* is-official (true|false) - is the image official or not + + +#### stars + +This example displays images with a name containing 'busybox' and at +least 3 stars: + +```bash +$ docker search --filter stars=3 busybox + +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +busybox Busybox base image. 325 [OK] +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] +``` + + +#### is-automated + +This example displays images with a name containing 'busybox' +and are automated builds: + +```bash +$ docker search --filter is-automated busybox + +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] +``` + +#### is-official + +This example displays images with a name containing 'busybox', at least +3 stars and are official builds: + +```bash +$ docker search --filter "is-official=true" --filter "stars=3" busybox + +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] +``` diff --git a/components/engine/docs/reference/commandline/secret.md b/components/engine/docs/reference/commandline/secret.md new file mode 100644 index 0000000000..50734407ab --- /dev/null +++ b/components/engine/docs/reference/commandline/secret.md @@ -0,0 +1,45 @@ +--- +title: "secret" +description: "The secret command description and usage" +keywords: "secret" +--- + + + +# secret + +```markdown +Usage: docker secret COMMAND + +Manage Docker secrets + +Options: + --help Print usage + +Commands: + create Create a secret from a file or STDIN as content + inspect Display detailed information on one or more secrets + ls List secrets + rm Remove one or more secrets + +Run 'docker secret COMMAND --help' for more information on a command. + +``` + +## Description + +Manage secrets. + +## Related commands + +* [secret create](secret_create.md) +* [secret inspect](secret_inspect.md) +* [secret list](secret_list.md) +* [secret rm](secret_rm.md) diff --git a/components/engine/docs/reference/commandline/secret_create.md b/components/engine/docs/reference/commandline/secret_create.md new file mode 100644 index 0000000000..e534dde553 --- /dev/null +++ b/components/engine/docs/reference/commandline/secret_create.md @@ -0,0 +1,99 @@ +--- +title: "secret create" +description: "The secret create command description and usage" +keywords: ["secret, create"] +--- + + + +# secret create + +```Markdown +Usage: docker secret create [OPTIONS] SECRET file|- + +Create a secret from a file or STDIN as content + +Options: + --help Print usage + -l, --label list Secret labels (default []) +``` + +## Description + +Creates a secret using standard input or from a file for the secret content. You must run this command on a manager node. + +For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). + +## Examples + +### Create a secret + +```bash +$ echo | docker secret create my_secret - + +onakdyv307se2tl7nl20anokv + +$ docker secret ls + +ID NAME CREATED UPDATED +onakdyv307se2tl7nl20anokv my_secret 6 seconds ago 6 seconds ago +``` + +### Create a secret with a file + +```bash +$ docker secret create my_secret ./secret.json + +dg426haahpi5ezmkkj5kyl3sn + +$ docker secret ls + +ID NAME CREATED UPDATED +dg426haahpi5ezmkkj5kyl3sn my_secret 7 seconds ago 7 seconds ago +``` + +### Create a secret with labels + +```bash +$ docker secret create --label env=dev \ + --label rev=20170324 \ + my_secret ./secret.json + +eo7jnzguqgtpdah3cm5srfb97 +``` + +```none +$ docker secret inspect my_secret + +[ + { + "ID": "eo7jnzguqgtpdah3cm5srfb97", + "Version": { + "Index": 17 + }, + "CreatedAt": "2017-03-24T08:15:09.735271783Z", + "UpdatedAt": "2017-03-24T08:15:09.735271783Z", + "Spec": { + "Name": "my_secret", + "Labels": { + "env": "dev", + "rev": "20170324" + } + } + } +] +``` + + +## Related commands + +* [secret inspect](secret_inspect.md) +* [secret ls](secret_ls.md) +* [secret rm](secret_rm.md) diff --git a/components/engine/docs/reference/commandline/secret_inspect.md b/components/engine/docs/reference/commandline/secret_inspect.md new file mode 100644 index 0000000000..cecf3c1ddc --- /dev/null +++ b/components/engine/docs/reference/commandline/secret_inspect.md @@ -0,0 +1,95 @@ +--- +title: "secret inspect" +description: "The secret inspect command description and usage" +keywords: ["secret, inspect"] +--- + + + +# secret inspect + +```Markdown +Usage: docker secret inspect [OPTIONS] SECRET [SECRET...] + +Display detailed information on one or more secrets + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +Inspects the specified secret. This command has to be run targeting a manager +node. + +By default, this renders all results in a JSON array. If a format is specified, +the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). + +## Examples + +### Inspect a secret by name or ID + +You can inspect a secret, either by its *name*, or *ID* + +For example, given the following secret: + +```bash +$ docker secret ls + +ID NAME CREATED UPDATED +eo7jnzguqgtpdah3cm5srfb97 my_secret 3 minutes ago 3 minutes ago +``` + +```none +$ docker secret inspect secret.json + +[ + { + "ID": "eo7jnzguqgtpdah3cm5srfb97", + "Version": { + "Index": 17 + }, + "CreatedAt": "2017-03-24T08:15:09.735271783Z", + "UpdatedAt": "2017-03-24T08:15:09.735271783Z", + "Spec": { + "Name": "my_secret", + "Labels": { + "env": "dev", + "rev": "20170324" + } + } + } +] +``` + +### Formatting + +You can use the --format option to obtain specific information about a +secret. The following example command outputs the creation time of the +secret. + +```bash +$ docker secret inspect --format='{{.CreatedAt}}' eo7jnzguqgtpdah3cm5srfb97 + +2017-03-24 08:15:09.735271783 +0000 UTC +``` + + +## Related commands + +* [secret create](secret_create.md) +* [secret ls](secret_ls.md) +* [secret rm](secret_rm.md) diff --git a/components/engine/docs/reference/commandline/secret_ls.md b/components/engine/docs/reference/commandline/secret_ls.md new file mode 100644 index 0000000000..9b60227b8e --- /dev/null +++ b/components/engine/docs/reference/commandline/secret_ls.md @@ -0,0 +1,157 @@ +--- +title: "secret ls" +description: "The secret ls command description and usage" +keywords: ["secret, ls"] +--- + + + +# secret ls + +```Markdown +Usage: docker secret ls [OPTIONS] + +List secrets + +Aliases: + ls, list + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print secrets using a Go template + --help Print usage + -q, --quiet Only display IDs +``` + +## Description + +Run this command on a manager node to list the secrets in the swarm. + +For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). + +## Examples + +```bash +$ docker secret ls + +ID NAME CREATED UPDATED +6697bflskwj1998km1gnnjr38 q5s5570vtvnimefos1fyeo2u2 6 weeks ago 6 weeks ago +9u9hk4br2ej0wgngkga6rp4hq my_secret 5 weeks ago 5 weeks ago +mem02h8n73mybpgqjf0kfi1n0 test_secret 3 seconds ago 3 seconds ago +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [id](secret_ls.md#id) (secret's ID) +* [label](secret_ls.md#label) (`label=` or `label==`) +* [name](secret_ls.md#name) (secret's name) + +#### id + +The `id` filter matches all or prefix of a secret's id. + +```bash +$ docker secret ls -f "id=6697bflskwj1998km1gnnjr38" + +ID NAME CREATED UPDATED +6697bflskwj1998km1gnnjr38 q5s5570vtvnimefos1fyeo2u2 6 weeks ago 6 weeks ago +``` + +#### label + +The `label` filter matches secrets based on the presence of a `label` alone or +a `label` and a value. + +The following filter matches all secrets with a `project` label regardless of +its value: + +```bash +$ docker secret ls --filter label=project + +ID NAME CREATED UPDATED +mem02h8n73mybpgqjf0kfi1n0 test_secret About an hour ago About an hour ago +``` + +The following filter matches only services with the `project` label with the +`project-a` value. + +```bash +$ docker service ls --filter label=project=test + +ID NAME CREATED UPDATED +mem02h8n73mybpgqjf0kfi1n0 test_secret About an hour ago About an hour ago +``` + +#### name + +The `name` filter matches on all or prefix of a secret's name. + +The following filter matches secret with a name containing a prefix of `test`. + +```bash +$ docker secret ls --filter name=test_secret + +ID NAME CREATED UPDATED +mem02h8n73mybpgqjf0kfi1n0 test_secret About an hour ago About an hour ago +``` + +### Format the output + +The formatting option (`--format`) pretty prints secrets output +using a Go template. + +Valid placeholders for the Go template are listed below: + +| Placeholder | Description | +| ------------ | ------------------------------------------------------------------------------------ | +| `.ID` | Secret ID | +| `.Name` | Secret name | +| `.CreatedAt` | Time when the secret was created | +| `.UpdatedAt` | Time when the secret was updated | +| `.Labels` | All labels assigned to the secret | +| `.Label` | Value of a specific label for this secret. For example `{{.Label "secret.ssh.key"}}` | + +When using the `--format` option, the `secret ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, will include column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Name` entries separated by a colon for all images: + +```bash +$ docker secret ls --format "{{.ID}}: {{.Name}}" + +77af4d6b9913: secret-1 +b6fa739cedf5: secret-2 +78a85c484f71: secret-3 +``` + +To list all secrets with their name and created date in a table format you +can use: + +```bash +$ docker secret ls --format "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}" + +ID NAME CREATED +77af4d6b9913 secret-1 5 minutes ago +b6fa739cedf5 secret-2 3 hours ago +78a85c484f71 secret-3 10 days ago +``` + +## Related commands + +* [secret create](secret_create.md) +* [secret inspect](secret_inspect.md) +* [secret rm](secret_rm.md) diff --git a/components/engine/docs/reference/commandline/secret_rm.md b/components/engine/docs/reference/commandline/secret_rm.md new file mode 100644 index 0000000000..1e10350f96 --- /dev/null +++ b/components/engine/docs/reference/commandline/secret_rm.md @@ -0,0 +1,54 @@ +--- +title: "secret rm" +description: "The secret rm command description and usage" +keywords: ["secret, rm"] +--- + + + +# secret rm + +```Markdown +Usage: docker secret rm SECRET [SECRET...] + +Remove one or more secrets + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +## Description + +Removes the specified secrets from the swarm. This command has to be run +targeting a manager node. + +For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). + +## Examples + +This example removes a secret: + +```bash +$ docker secret rm secret.json +sapth4csdo5b6wz2p5uimh5xg +``` + +> **Warning**: Unlike `docker rm`, this command does not ask for confirmation +> before removing a secret. + + +## Related commands + +* [secret create](secret_create.md) +* [secret inspect](secret_inspect.md) +* [secret ls](secret_ls.md) diff --git a/components/engine/docs/reference/commandline/service.md b/components/engine/docs/reference/commandline/service.md new file mode 100644 index 0000000000..2c12c67648 --- /dev/null +++ b/components/engine/docs/reference/commandline/service.md @@ -0,0 +1,42 @@ +--- +title: "service" +description: "The service command description and usage" +keywords: "service" +--- + + + +# service + +```markdown +Usage: docker service COMMAND + +Manage services + +Options: + --help Print usage + +Commands: + create Create a new service + inspect Display detailed information on one or more services + logs Fetch the logs of a service or task + ls List services + ps List the tasks of one or more services + rm Remove one or more services + scale Scale one or multiple replicated services + update Update a service + +Run 'docker service COMMAND --help' for more information on a command. +``` + +## Description + +Manage services. + diff --git a/components/engine/docs/reference/commandline/service_create.md b/components/engine/docs/reference/commandline/service_create.md new file mode 100644 index 0000000000..78faa98bf7 --- /dev/null +++ b/components/engine/docs/reference/commandline/service_create.md @@ -0,0 +1,846 @@ +--- +title: "service create" +description: "The service create command description and usage" +keywords: "service, create" +--- + + + +# service create + +```Markdown +Usage: docker service create [OPTIONS] IMAGE [COMMAND] [ARG...] + +Create a new service + +Options: + --constraint list Placement constraints + --container-label list Container labels + -d, --detach Exit immediately instead of waiting for the service to converge (default true) + --dns list Set custom DNS servers + --dns-option list Set DNS options + --dns-search list Set custom DNS search domains + --endpoint-mode string Endpoint mode (vip or dnsrr) (default "vip") + --entrypoint command Overwrite the default ENTRYPOINT of the image + -e, --env list Set environment variables + --env-file list Read in a file of environment variables + --group list Set one or more supplementary user groups for the container + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ms|s|m|h) + --health-retries int Consecutive failures needed to report unhealthy + --health-start-period duration Start period for the container to initialize before counting retries towards unstable (ms|s|m|h) + --health-timeout duration Maximum time to allow one check to run (ms|s|m|h) + --help Print usage + --host list Set one or more custom host-to-IP mappings (host:ip) + --hostname string Container hostname + -l, --label list Service labels + --limit-cpu decimal Limit CPUs + --limit-memory bytes Limit Memory + --log-driver string Logging driver for service + --log-opt list Logging driver options + --mode string Service mode (replicated or global) (default "replicated") + --mount mount Attach a filesystem mount to the service + --name string Service name + --network list Network attachments + --no-healthcheck Disable any container-specified HEALTHCHECK + --placement-pref pref Add a placement preference + -p, --publish port Publish a port as a node port + -q, --quiet Suppress progress output + --read-only Mount the container's root filesystem as read only + --replicas uint Number of tasks + --reserve-cpu decimal Reserve CPUs + --reserve-memory bytes Reserve Memory + --restart-condition string Restart when condition is met ("none"|"on-failure"|"any") (default "any") + --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) (default 5s) + --restart-max-attempts uint Maximum number of restarts before giving up + --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) + --rollback-delay duration Delay between task rollbacks (ns|us|ms|s|m|h) (default 0s) + --rollback-failure-action string Action on rollback failure ("pause"|"continue") (default "pause") + --rollback-max-failure-ratio float Failure rate to tolerate during a rollback (default 0) + --rollback-monitor duration Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h) (default 5s) + --rollback-order string Rollback order ("start-first"|"stop-first") (default "stop-first") + --rollback-parallelism uint Maximum number of tasks rolled back simultaneously (0 to roll back all at once) (default 1) + --secret secret Specify secrets to expose to the service + --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) (default 10s) + --stop-signal string Signal to stop the container + -t, --tty Allocate a pseudo-TTY + --update-delay duration Delay between updates (ns|us|ms|s|m|h) (default 0s) + --update-failure-action string Action on update failure ("pause"|"continue"|"rollback") (default "pause") + --update-max-failure-ratio float Failure rate to tolerate during an update (default 0) + --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 5s) + --update-order string Update order ("start-first"|"stop-first") (default "stop-first") + --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) (default 1) + -u, --user string Username or UID (format: [:]) + --with-registry-auth Send registry authentication details to swarm agents + -w, --workdir string Working directory inside the container +``` + +## Description + +Creates a service as described by the specified parameters. You must run this +command on a manager node. + +## Examples + +### Create a service + +```bash +$ docker service create --name redis redis:3.0.6 + +dmu1ept4cxcfe8k8lhtux3ro3 + +$ docker service create --mode global --name redis2 redis:3.0.6 + +a8q9dasaafudfs8q8w32udass + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +dmu1ept4cxcf redis replicated 1/1 redis:3.0.6 +a8q9dasaafud redis2 global 1/1 redis:3.0.6 +``` + +### Create a service with 5 replica tasks (--replicas) + +Use the `--replicas` flag to set the number of replica tasks for a replicated +service. The following command creates a `redis` service with `5` replica tasks: + +```bash +$ docker service create --name redis --replicas=5 redis:3.0.6 + +4cdgfyky7ozwh3htjfw0d12qv +``` + +The above command sets the *desired* number of tasks for the service. Even +though the command returns immediately, actual scaling of the service may take +some time. The `REPLICAS` column shows both the *actual* and *desired* number +of replica tasks for the service. + +In the following example the desired state is `5` replicas, but the current +number of `RUNNING` tasks is `3`: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +4cdgfyky7ozw redis replicated 3/5 redis:3.0.7 +``` + +Once all the tasks are created and `RUNNING`, the actual number of tasks is +equal to the desired number: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +4cdgfyky7ozw redis replicated 5/5 redis:3.0.7 +``` + +### Create a service with secrets + +Use the `--secret` flag to give a container access to a +[secret](secret_create.md). + +Create a service specifying a secret: + +```bash +$ docker service create --name redis --secret secret.json redis:3.0.6 + +4cdgfyky7ozwh3htjfw0d12qv +``` + +Create a service specifying the secret, target, user/group ID and mode: + +```bash +$ docker service create --name redis \ + --secret source=ssh-key,target=ssh \ + --secret source=app-key,target=app,uid=1000,gid=1001,mode=0400 \ + redis:3.0.6 + +4cdgfyky7ozwh3htjfw0d12qv +``` + +Secrets are located in `/run/secrets` in the container. If no target is +specified, the name of the secret will be used as the in memory file in the +container. If a target is specified, that will be the filename. In the +example above, two files will be created: `/run/secrets/ssh` and +`/run/secrets/app` for each of the secret targets specified. + +### Create a service with a rolling update policy + +```bash +$ docker service create \ + --replicas 10 \ + --name redis \ + --update-delay 10s \ + --update-parallelism 2 \ + redis:3.0.6 +``` + +When you run a [service update](service_update.md), the scheduler updates a +maximum of 2 tasks at a time, with `10s` between updates. For more information, +refer to the [rolling updates +tutorial](https://docs.docker.com/engine/swarm/swarm-tutorial/rolling-update/). + +### Set environment variables (-e, --env) + +This sets environmental variables for all tasks in a service. For example: + +```bash +$ docker service create --name redis_2 --replicas 5 --env MYVAR=foo redis:3.0.6 +``` + +### Create a service with specific hostname (--hostname) + +This option sets the docker service containers hostname to a specific string. +For example: + +```bash +$ docker service create --name redis --hostname myredis redis:3.0.6 +``` + +### Set metadata on a service (-l, --label) + +A label is a `key=value` pair that applies metadata to a service. To label a +service with two labels: + +```bash +$ docker service create \ + --name redis_2 \ + --label com.example.foo="bar" + --label bar=baz \ + redis:3.0.6 +``` + +For more information about labels, refer to [apply custom +metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). + +### Add bind-mounts or volumes + +Docker supports two different kinds of mounts, which allow containers to read to +or write from files or directories on other containers or the host operating +system. These types are _data volumes_ (often referred to simply as volumes) and +_bind-mounts_. + +Additionally, Docker supports `tmpfs` mounts. + +A **bind-mount** makes a file or directory on the host available to the +container it is mounted within. A bind-mount may be either read-only or +read-write. For example, a container might share its host's DNS information by +means of a bind-mount of the host's `/etc/resolv.conf` or a container might +write logs to its host's `/var/log/myContainerLogs` directory. If you use +bind-mounts and your host and containers have different notions of permissions, +access controls, or other such details, you will run into portability issues. + +A **named volume** is a mechanism for decoupling persistent data needed by your +container from the image used to create the container and from the host machine. +Named volumes are created and managed by Docker, and a named volume persists +even when no container is currently using it. Data in named volumes can be +shared between a container and the host machine, as well as between multiple +containers. Docker uses a _volume driver_ to create, manage, and mount volumes. +You can back up or restore volumes using Docker commands. + +A **tmpfs** mounts a tmpfs inside a container for volatile data. + +Consider a situation where your image starts a lightweight web server. You could +use that image as a base image, copy in your website's HTML files, and package +that into another image. Each time your website changed, you'd need to update +the new image and redeploy all of the containers serving your website. A better +solution is to store the website in a named volume which is attached to each of +your web server containers when they start. To update the website, you just +update the named volume. + +For more information about named volumes, see +[Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/). + +The following table describes options which apply to both bind-mounts and named +volumes in a service: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionRequiredDescription
types +

The type of mount, can be either volume, bind, or tmpfs. Defaults to volume if no type is specified. +

    +
  • volume: mounts a [managed volume](volume_create.md) into the container.
  • +
  • bind: bind-mounts a directory or file from the host into the container.
  • +
  • tmpfs: mount a tmpfs in the container
  • +

+
src or sourcefor type=bind only> +
    +
  • + type=volume: src is an optional way to specify the name of the volume (for example, src=my-volume). + If the named volume does not exist, it is automatically created. If no src is specified, the volume is + assigned a random name which is guaranteed to be unique on the host, but may not be unique cluster-wide. + A randomly-named volume has the same lifecycle as its container and is destroyed when the container + is destroyed (which is upon service update, or when scaling or re-balancing the service) +
  • +
  • + type=bind: src is required, and specifies an absolute path to the file or directory to bind-mount + (for example, src=/path/on/host/). An error is produced if the file or directory does not exist. +
  • +
  • + type=tmpfs: src is not supported. +
  • +
+

dst or destination or target

yes +

Mount path inside the container, for example /some/path/in/container/. + If the path does not exist in the container's filesystem, the Engine creates + a directory at the specified location before mounting the volume or bind-mount.

+

readonly or ro

+

The Engine mounts binds and volumes read-write unless readonly option + is given when mounting the bind or volume. +

    +
  • true or 1 or no value: Mounts the bind or volume read-only.
  • +
  • false or 0: Mounts the bind or volume read-write.
  • +

+
consistency +

The consistency requirements for the mount; one of +

    +
  • default: Equivalent to consistent.
  • +
  • consistent: Full consistency. The container runtime and the host maintain an identical view of the mount at all times.
  • +
  • cached: The host's view of the mount is authoritative. There may be delays before updates made on the host are visible within a container.
  • +
  • delegated: The container runtime's view of the mount is authoritative. There may be delays before updates made in a container are are visible on the host.
  • +
+

+
+ +#### Bind Propagation + +Bind propagation refers to whether or not mounts created within a given +bind-mount or named volume can be propagated to replicas of that mount. Consider +a mount point `/mnt`, which is also mounted on `/tmp`. The propation settings +control whether a mount on `/tmp/a` would also be available on `/mnt/a`. Each +propagation setting has a recursive counterpoint. In the case of recursion, +consider that `/tmp/a` is also mounted as `/foo`. The propagation settings +control whether `/mnt/a` and/or `/tmp/a` would exist. + +The `bind-propagation` option defaults to `rprivate` for both bind-mounts and +volume mounts, and is only configurable for bind-mounts. In other words, named +volumes do not support bind propagation. + +- **`shared`**: Sub-mounts of the original mount are exposed to replica mounts, + and sub-mounts of replica mounts are also propagated to the + original mount. +- **`slave`**: similar to a shared mount, but only in one direction. If the + original mount exposes a sub-mount, the replica mount can see it. + However, if the replica mount exposes a sub-mount, the original + mount cannot see it. +- **`private`**: The mount is private. Sub-mounts within it are not exposed to + replica mounts, and sub-mounts of replica mounts are not + exposed to the original mount. +- **`rshared`**: The same as shared, but the propagation also extends to and from + mount points nested within any of the original or replica mount + points. +- **`rslave`**: The same as `slave`, but the propagation also extends to and from + mount points nested within any of the original or replica mount + points. +- **`rprivate`**: The default. The same as `private`, meaning that no mount points + anywhere within the original or replica mount points propagate + in either direction. + +For more information about bind propagation, see the +[Linux kernel documentation for shared subtree](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + +#### Options for Named Volumes + +The following options can only be used for named volumes (`type=volume`); + + + + + + + + + + + + + + + + + + + + + + + +
OptionDescription
volume-driver +

Name of the volume-driver plugin to use for the volume. Defaults to + "local", to use the local volume driver to create the volume if the + volume does not exist.

+
volume-label + One or more custom metadata ("labels") to apply to the volume upon + creation. For example, + `volume-label=mylabel=hello-world,my-other-label=hello-mars`. For more + information about labels, refer to + apply custom metadata. +
volume-nocopy + By default, if you attach an empty volume to a container, and files or + directories already existed at the mount-path in the container (dst), + the Engine copies those files and directories into the volume, allowing + the host to access them. Set `volume-nocopy` to disables copying files + from the container's filesystem to the volume and mount the empty volume. + + A value is optional: + +
    +
  • true or 1: Default if you do not provide a value. Disables copying.
  • +
  • false or 0: Enables copying.
  • +
+
volume-opt + Options specific to a given volume driver, which will be passed to the + driver when creating the volume. Options are provided as a comma-separated + list of key/value pairs, for example, + volume-opt=some-option=some-value,volume-opt=some-other-option=some-other-value. + For available options for a given driver, refer to that driver's + documentation. +
+ + +#### Options for tmpfs + +The following options can only be used for tmpfs mounts (`type=tmpfs`); + + + + + + + + + + + + + + + +
OptionDescription
tmpfs-sizeSize of the tmpfs mount in bytes. Unlimited by default in Linux.
tmpfs-modeFile mode of the tmpfs in octal. (e.g. "700" or "0700".) Defaults to "1777" in Linux.
+ + +#### Differences between "--mount" and "--volume" + +The `--mount` flag supports most options that are supported by the `-v` +or `--volume` flag for `docker run`, with some important exceptions: + +- The `--mount` flag allows you to specify a volume driver and volume driver + options *per volume*, without creating the volumes in advance. In contrast, + `docker run` allows you to specify a single volume driver which is shared + by all volumes, using the `--volume-driver` flag. + +- The `--mount` flag allows you to specify custom metadata ("labels") for a volume, + before the volume is created. + +- When you use `--mount` with `type=bind`, the host-path must refer to an *existing* + path on the host. The path will not be created for you and the service will fail + with an error if the path does not exist. + +- The `--mount` flag does not allow you to relabel a volume with `Z` or `z` flags, + which are used for `selinux` labeling. + +#### Create a service using a named volume + +The following example creates a service that uses a named volume: + +```bash +$ docker service create \ + --name my-service \ + --replicas 3 \ + --mount type=volume,source=my-volume,destination=/path/in/container,volume-label="color=red",volume-label="shape=round" \ + nginx:alpine +``` + +For each replica of the service, the engine requests a volume named "my-volume" +from the default ("local") volume driver where the task is deployed. If the +volume does not exist, the engine creates a new volume and applies the "color" +and "shape" labels. + +When the task is started, the volume is mounted on `/path/in/container/` inside +the container. + +Be aware that the default ("local") volume is a locally scoped volume driver. +This means that depending on where a task is deployed, either that task gets a +*new* volume named "my-volume", or shares the same "my-volume" with other tasks +of the same service. Multiple containers writing to a single shared volume can +cause data corruption if the software running inside the container is not +designed to handle concurrent processes writing to the same location. Also take +into account that containers can be re-scheduled by the Swarm orchestrator and +be deployed on a different node. + +#### Create a service that uses an anonymous volume + +The following command creates a service with three replicas with an anonymous +volume on `/path/in/container`: + +```bash +$ docker service create \ + --name my-service \ + --replicas 3 \ + --mount type=volume,destination=/path/in/container \ + nginx:alpine +``` + +In this example, no name (`source`) is specified for the volume, so a new volume +is created for each task. This guarantees that each task gets its own volume, +and volumes are not shared between tasks. Anonymous volumes are removed after +the task using them is complete. + +#### Create a service that uses a bind-mounted host directory + +The following example bind-mounts a host directory at `/path/in/container` in +the containers backing the service: + +```bash +$ docker service create \ + --name my-service \ + --mount type=bind,source=/path/on/host,destination=/path/in/container \ + nginx:alpine +``` + +### Set service mode (--mode) + +The service mode determines whether this is a _replicated_ service or a _global_ +service. A replicated service runs as many tasks as specified, while a global +service runs on each active node in the swarm. + +The following command creates a global service: + +```bash +$ docker service create \ + --name redis_2 \ + --mode global \ + redis:3.0.6 +``` + +### Specify service constraints (--constraint) + +You can limit the set of nodes where a task can be scheduled by defining +constraint expressions. Multiple constraints find nodes that satisfy every +expression (AND match). Constraints can match node or Docker Engine labels as +follows: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
node attributematchesexample
node.idNode IDnode.id == 2ivku8v2gvtg4
node.hostnameNode hostnamenode.hostname != node-2
node.roleNode rolenode.role == manager
node.labelsuser defined node labelsnode.labels.security == high
engine.labelsDocker Engine's labelsengine.labels.operatingsystem == ubuntu 14.04
+ + +`engine.labels` apply to Docker Engine labels like operating system, +drivers, etc. Swarm administrators add `node.labels` for operational purposes by +using the [`docker node update`](node_update.md) command. + +For example, the following limits tasks for the redis service to nodes where the +node type label equals queue: + +```bash +$ docker service create \ + --name redis_2 \ + --constraint 'node.labels.type == queue' \ + redis:3.0.6 +``` + +### Specify service placement preferences (--placement-pref) + +You can set up the service to divide tasks evenly over different categories of +nodes. One example of where this can be useful is to balance tasks over a set +of datacenters or availability zones. The example below illustrates this: + +```bash +$ docker service create \ + --replicas 9 \ + --name redis_2 \ + --placement-pref 'spread=node.labels.datacenter' \ + redis:3.0.6 +``` + +This uses `--placement-pref` with a `spread` strategy (currently the only +supported strategy) to spread tasks evenly over the values of the `datacenter` +node label. In this example, we assume that every node has a `datacenter` node +label attached to it. If there are three different values of this label among +nodes in the swarm, one third of the tasks will be placed on the nodes +associated with each value. This is true even if there are more nodes with one +value than another. For example, consider the following set of nodes: + +- Three nodes with `node.labels.datacenter=east` +- Two nodes with `node.labels.datacenter=south` +- One node with `node.labels.datacenter=west` + +Since we are spreading over the values of the `datacenter` label and the +service has 9 replicas, 3 replicas will end up in each datacenter. There are +three nodes associated with the value `east`, so each one will get one of the +three replicas reserved for this value. There are two nodes with the value +`south`, and the three replicas for this value will be divided between them, +with one receiving two replicas and another receiving just one. Finally, `west` +has a single node that will get all three replicas reserved for `west`. + +If the nodes in one category (for example, those with +`node.labels.datacenter=south`) can't handle their fair share of tasks due to +constraints or resource limitations, the extra tasks will be assigned to other +nodes instead, if possible. + +Both engine labels and node labels are supported by placement preferences. The +example above uses a node label, because the label is referenced with +`node.labels.datacenter`. To spread over the values of an engine label, use +`--placement-pref spread=engine.labels.`. + +It is possible to add multiple placement preferences to a service. This +establishes a hierarchy of preferences, so that tasks are first divided over +one category, and then further divided over additional categories. One example +of where this may be useful is dividing tasks fairly between datacenters, and +then splitting the tasks within each datacenter over a choice of racks. To add +multiple placement preferences, specify the `--placement-pref` flag multiple +times. The order is significant, and the placement preferences will be applied +in the order given when making scheduling decisions. + +The following example sets up a service with multiple placement preferences. +Tasks are spread first over the various datacenters, and then over racks +(as indicated by the respective labels): + +```bash +$ docker service create \ + --replicas 9 \ + --name redis_2 \ + --placement-pref 'spread=node.labels.datacenter' \ + --placement-pref 'spread=node.labels.rack' \ + redis:3.0.6 +``` + +When updating a service with `docker service update`, `--placement-pref-add` +appends a new placement preference after all existing placement preferences. +`--placement-pref-rm` removes an existing placement preference that matches the +argument. + +### Attach a service to an existing network (--network) + +You can use overlay networks to connect one or more services within the swarm. + +First, create an overlay network on a manager node the docker network create +command: + +```bash +$ docker network create --driver overlay my-network + +etjpu59cykrptrgw0z0hk5snf +``` + +After you create an overlay network in swarm mode, all manager nodes have +access to the network. + +When you create a service and pass the --network flag to attach the service to +the overlay network: + +```bash +$ docker service create \ + --replicas 3 \ + --network my-network \ + --name my-web \ + nginx + +716thylsndqma81j6kkkb5aus +``` + +The swarm extends my-network to each node running the service. + +Containers on the same network can access each other using +[service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery). + +### Publish service ports externally to the swarm (-p, --publish) + +You can publish service ports to make them available externally to the swarm +using the `--publish` flag: + +```bash +$ docker service create --publish : nginx +``` + +For example: + +```bash +$ docker service create --name my_web --replicas 3 --publish 8080:80 nginx +``` + +When you publish a service port, the swarm routing mesh makes the service +accessible at the target port on every node regardless if there is a task for +the service running on the node. For more information refer to +[Use swarm mode routing mesh](https://docs.docker.com/engine/swarm/ingress/). + +### Publish a port for TCP only or UDP only + +By default, when you publish a port, it is a TCP port. You can +specifically publish a UDP port instead of or in addition to a TCP port. When +you publish both TCP and UDP ports, Docker 1.12.2 and earlier require you to +add the suffix `/tcp` for TCP ports. Otherwise it is optional. + +#### TCP only + +The following two commands are equivalent. + +```bash +$ docker service create --name dns-cache -p 53:53 dns-cache + +$ docker service create --name dns-cache -p 53:53/tcp dns-cache +``` + +#### TCP and UDP + +```bash +$ docker service create --name dns-cache -p 53:53/tcp -p 53:53/udp dns-cache +``` + +#### UDP only + +```bash +$ docker service create --name dns-cache -p 53:53/udp dns-cache +``` + +### Create services using templates + +You can use templates for some flags of `service create`, using the syntax +provided by the Go's [text/template](http://golang.org/pkg/text/template/) package. + +The supported flags are the following : + +- `--hostname` +- `--mount` +- `--env` + +Valid placeholders for the Go template are listed below: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PlaceholderDescription
.Service.IDService ID
.Service.NameService name
.Service.LabelsService labels
.Node.IDNode ID
.Task.IDTask ID
.Task.NameTask name
.Task.SlotTask slot
+ + +#### Template example + +In this example, we are going to set the template of the created containers based on the +service's name and the node's ID where it sits. + +```bash +$ docker service create --name hosttempl \ + --hostname="{{.Node.ID}}-{{.Service.Name}}"\ + busybox top + +va8ew30grofhjoychbr6iot8c + +$ docker service ps va8ew30grofhjoychbr6iot8c + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +wo41w8hg8qan hosttempl.1 busybox:latest@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 2e7a8a9c4da2 Running Running about a minute ago + +$ docker inspect --format="{{.Config.Hostname}}" hosttempl.1.wo41w8hg8qanxwjwsg4kxpprj + +x3ti0erg11rjpg64m75kej2mz-hosttempl +``` + +## Related commands + +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) + + diff --git a/components/engine/docs/reference/commandline/service_inspect.md b/components/engine/docs/reference/commandline/service_inspect.md new file mode 100644 index 0000000000..24c593cec9 --- /dev/null +++ b/components/engine/docs/reference/commandline/service_inspect.md @@ -0,0 +1,170 @@ +--- +title: "service inspect" +description: "The service inspect command description and usage" +keywords: "service, inspect" +--- + + + +# service inspect + +```Markdown +Usage: docker service inspect [OPTIONS] SERVICE [SERVICE...] + +Display detailed information on one or more services + +Options: + -f, --format string Format the output using the given Go template + --help Print usage + --pretty Print the information in a human friendly format +``` + +## Description + +Inspects the specified service. This command has to be run targeting a manager +node. + +By default, this renders all results in a JSON array. If a format is specified, +the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +### Inspect a service by name or ID + +You can inspect a service, either by its *name*, or *ID* + +For example, given the following service; + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 +``` + +Both `docker service inspect redis`, and `docker service inspect dmu1ept4cxcf` +produce the same result: + +```none +$ docker service inspect redis + +[ + { + "ID": "dmu1ept4cxcfe8k8lhtux3ro3", + "Version": { + "Index": 12 + }, + "CreatedAt": "2016-06-17T18:44:02.558012087Z", + "UpdatedAt": "2016-06-17T18:44:02.558012087Z", + "Spec": { + "Name": "redis", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis:3.0.6" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": {}, + "EndpointSpec": { + "Mode": "vip" + } + }, + "Endpoint": { + "Spec": {} + } + } +] +``` + +```bash +$ docker service inspect dmu1ept4cxcf + +[ + { + "ID": "dmu1ept4cxcfe8k8lhtux3ro3", + "Version": { + "Index": 12 + }, + ... + } +] +``` + +### Formatting + +You can print the inspect output in a human-readable format instead of the default +JSON output, by using the `--pretty` option: + +```bash +$ docker service inspect --pretty frontend + +ID: c8wgl7q4ndfd52ni6qftkvnnp +Name: frontend +Labels: + - org.example.projectname=demo-app +Service Mode: REPLICATED + Replicas: 5 +Placement: +UpdateConfig: + Parallelism: 0 + On failure: pause + Max failure ratio: 0 +ContainerSpec: + Image: nginx:alpine +Resources: +Networks: net1 +Endpoint Mode: vip +Ports: + PublishedPort = 4443 + Protocol = tcp + TargetPort = 443 + PublishMode = ingress +``` + +You can also use `--format pretty` for the same effect. + + +#### Find the number of tasks running as part of a service + +The `--format` option can be used to obtain specific information about a +service. For example, the following command outputs the number of replicas +of the "redis" service. + +```bash +$ docker service inspect --format='{{.Spec.Mode.Replicated.Replicas}}' redis + +10 +``` + + +## Related commands + +* [service create](service_create.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/components/engine/docs/reference/commandline/service_logs.md b/components/engine/docs/reference/commandline/service_logs.md new file mode 100644 index 0000000000..fd328d0f6d --- /dev/null +++ b/components/engine/docs/reference/commandline/service_logs.md @@ -0,0 +1,85 @@ +--- +title: "service logs" +description: "The service logs command description and usage" +keywords: "service, task, logs" +--- + + + +# service logs + +```Markdown +Usage: docker service logs [OPTIONS] SERVICE|TASK + +Fetch the logs of a service or task + +Options: + -f, --follow Follow log output + --help Print usage + --no-resolve Do not map IDs to Names in output + --no-task-ids Do not include task IDs in output + --no-trunc Do not truncate output + --since string Show logs since timestamp + --tail string Number of lines to show from the end of the logs (default "all") + -t, --timestamps Show timestamps +``` + +## Description + +The `docker service logs` command batch-retrieves logs present at the time of execution. + +The `docker service logs` command can be used with either the name or ID of a +service, or with the ID of a task. If a service is passed, it will display logs +for all of the containers in that service. If a task is passed, it will only +display logs from that particular task. + +> **Note**: This command is only functional for services that are started with +> the `json-file` or `journald` logging driver. + +For more information about selecting and configuring logging drivers, refer to +[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). + +The `docker service logs --follow` command will continue streaming the new output from +the service's `STDOUT` and `STDERR`. + +Passing a negative number or a non-integer to `--tail` is invalid and the +value is set to `all` in that case. + +The `docker service logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) +, for example `2014-09-16T06:17:46.000000000Z`, to each +log entry. To ensure that the timestamps are aligned the +nano-second part of the timestamp will be padded with zero when necessary. + +The `docker service logs --details` command will add on extra attributes, such as +environment variables and labels, provided to `--log-opt` when creating the +service. + +The `--since` option shows only the service logs generated after +a given date. You can specify the date as an RFC 3339 date, a UNIX +timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date +format you may also use RFC3339Nano, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. You can combine the +`--since` option with either or both of the `--follow` or `--tail` options. + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/components/engine/docs/reference/commandline/service_ls.md b/components/engine/docs/reference/commandline/service_ls.md new file mode 100644 index 0000000000..c222c04857 --- /dev/null +++ b/components/engine/docs/reference/commandline/service_ls.md @@ -0,0 +1,164 @@ +--- +title: "service ls" +description: "The service ls command description and usage" +keywords: "service, ls" +--- + + + +# service ls + +```Markdown +Usage: docker service ls [OPTIONS] + +List services + +Aliases: + ls, list + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print services using a Go template + --help Print usage + -q, --quiet Only display IDs +``` + +## Description + +This command when run targeting a manager, lists services are running in the +swarm. + +## Examples + +On a manager node: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +c8wgl7q4ndfd frontend replicated 5/5 nginx:alpine +dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 +iwe3278osahj mongo global 7/7 mongo:3.3 +``` + +The `REPLICAS` column shows both the *actual* and *desired* number of tasks for +the service. + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [id](service_ls.md#id) +* [label](service_ls.md#label) +* [mode](service_ls.md#mode) +* [name](service_ls.md#name) + +#### id + +The `id` filter matches all or part of a service's id. + +```bash +$ docker service ls -f "id=0bcjw" +ID NAME MODE REPLICAS IMAGE +0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 +``` + +#### label + +The `label` filter matches services based on the presence of a `label` alone or +a `label` and a value. + +The following filter matches all services with a `project` label regardless of +its value: + +```bash +$ docker service ls --filter label=project +ID NAME MODE REPLICAS IMAGE +01sl1rp6nj5u frontend2 replicated 1/1 nginx:alpine +36xvvwwauej0 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +The following filter matches only services with the `project` label with the +`project-a` value. + +```bash +$ docker service ls --filter label=project=project-a +ID NAME MODE REPLICAS IMAGE +36xvvwwauej0 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +#### mode + +The `mode` filter matches on the mode (either `replicated` or `global`) of a service. + +The following filter matches only `global` services. + +```bash +$ docker service ls --filter mode=global +ID NAME MODE REPLICAS IMAGE +w7y0v2yrn620 top global 1/1 busybox +``` + +#### name + +The `name` filter matches on all or part of a service's name. + +The following filter matches services with a name containing `redis`. + +```bash +$ docker service ls --filter name=redis +ID NAME MODE REPLICAS IMAGE +0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 +``` + +### Formatting + +The formatting options (`--format`) pretty-prints services output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +------------|------------------------------------------------------------------------------------------ +`.ID` | Service ID +`.Name` | Service name +`.Mode` | Service mode (replicated, global) +`.Replicas` | Service replicas +`.Image` | Service image +`.Ports` | Service ports published in ingress mode + +When using the `--format` option, the `service ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`ID`, `Mode`, and `Replicas` entries separated by a colon for all services: + +```bash +$ docker service ls --format "{{.ID}}: {{.Mode}} {{.Replicas}}" + +0zmvwuiu3vue: replicated 10/10 +fm6uf97exkul: global 5/5 +``` + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/components/engine/docs/reference/commandline/service_ps.md b/components/engine/docs/reference/commandline/service_ps.md new file mode 100644 index 0000000000..51e8604c7e --- /dev/null +++ b/components/engine/docs/reference/commandline/service_ps.md @@ -0,0 +1,194 @@ +--- +title: "service ps" +description: "The service ps command description and usage" +keywords: "service, tasks, ps" +aliases: ["/engine/reference/commandline/service_tasks/"] +--- + + + +# service ps + +```Markdown +Usage: docker service ps [OPTIONS] SERVICE [SERVICE...] + +List the tasks of one or more services + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print tasks using a Go template + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output + -q, --quiet Only display task IDs +``` + +## Description + +Lists the tasks that are running as part of the specified services. This command +has to be run targeting a manager node. + +## Examples + +### List the tasks that are part of a service + +The following command shows all the tasks that are part of the `redis` service: + +```bash +$ docker service ps redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +0qihejybwf1x redis.1 redis:3.0.5 manager1 Running Running 8 seconds +bk658fpbex0d redis.2 redis:3.0.5 worker2 Running Running 9 seconds +5ls5s5fldaqg redis.3 redis:3.0.5 worker1 Running Running 9 seconds +8ryt076polmc redis.4 redis:3.0.5 worker1 Running Running 9 seconds +1x0v8yomsncd redis.5 redis:3.0.5 manager1 Running Running 8 seconds +71v7je3el7rr redis.6 redis:3.0.5 worker2 Running Running 9 seconds +4l3zm9b7tfr7 redis.7 redis:3.0.5 worker2 Running Running 9 seconds +9tfpyixiy2i7 redis.8 redis:3.0.5 worker1 Running Running 9 seconds +3w1wu13yupln redis.9 redis:3.0.5 manager1 Running Running 8 seconds +8eaxrb2fqpbn redis.10 redis:3.0.5 manager1 Running Running 8 seconds +``` + +In addition to _running_ tasks, the output also shows the task history. For +example, after updating the service to use the `redis:3.0.6` image, the output +may look like this: + +```bash +$ docker service ps redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +50qe8lfnxaxk redis.1 redis:3.0.6 manager1 Running Running 6 seconds ago +ky2re9oz86r9 \_ redis.1 redis:3.0.5 manager1 Shutdown Shutdown 8 seconds ago +3s46te2nzl4i redis.2 redis:3.0.6 worker2 Running Running less than a second ago +nvjljf7rmor4 \_ redis.2 redis:3.0.6 worker2 Shutdown Rejected 23 seconds ago "No such image: redis@sha256:6…" +vtiuz2fpc0yb \_ redis.2 redis:3.0.5 worker2 Shutdown Shutdown 1 second ago +jnarweeha8x4 redis.3 redis:3.0.6 worker1 Running Running 3 seconds ago +vs448yca2nz4 \_ redis.3 redis:3.0.5 worker1 Shutdown Shutdown 4 seconds ago +jf1i992619ir redis.4 redis:3.0.6 worker1 Running Running 10 seconds ago +blkttv7zs8ee \_ redis.4 redis:3.0.5 worker1 Shutdown Shutdown 11 seconds ago +``` + +The number of items in the task history is determined by the +`--task-history-limit` option that was set when initializing the swarm. You can +change the task history retention limit using the +[`docker swarm update`](swarm_update.md) command. + +When deploying a service, docker resolves the digest for the service's +image, and pins the service to that digest. The digest is not shown by +default, but is printed if `--no-trunc` is used. The `--no-trunc` option +also shows the non-truncated task ID, and error-messages, as can be seen below; + +```bash +$ docker service ps --no-trunc redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +50qe8lfnxaxksi9w2a704wkp7 redis.1 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 manager1 Running Running 5 minutes ago +ky2re9oz86r9556i2szb8a8af \_ redis.1 redis:3.0.5@sha256:f8829e00d95672c48c60f468329d6693c4bdd28d1f057e755f8ba8b40008682e worker2 Shutdown Shutdown 5 minutes ago +bk658fpbex0d57cqcwoe3jthu redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Running Running 5 seconds +nvjljf7rmor4htv7l8rwcx7i7 \_ redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Shutdown Rejected 5 minutes ago "No such image: redis@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842" +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. + +The currently supported filters are: + +* [id](#id) +* [name](#name) +* [node](#node) +* [desired-state](#desired-state) + + +#### id + +The `id` filter matches on all or a prefix of a task's ID. + +```bash +$ docker service ps -f "id=8" redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +8ryt076polmc redis.4 redis:3.0.6 worker1 Running Running 9 seconds +8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds +``` + +#### name + +The `name` filter matches on task names. + +```bash +$ docker service ps -f "name=redis.1" redis +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +qihejybwf1x5 redis.1 redis:3.0.6 manager1 Running Running 8 seconds +``` + + +#### node + +The `node` filter matches on a node name or a node ID. + +```bash +$ docker service ps -f "node=manager1" redis +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +0qihejybwf1x redis.1 redis:3.0.6 manager1 Running Running 8 seconds +1x0v8yomsncd redis.5 redis:3.0.6 manager1 Running Running 8 seconds +3w1wu13yupln redis.9 redis:3.0.6 manager1 Running Running 8 seconds +8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds +``` + +#### desired-state + +The `desired-state` filter can take the values `running`, `shutdown`, or `accepted`. + +### Formatting + +The formatting options (`--format`) pretty-prints tasks output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +----------------|------------------------------------------------------------------------------------------ +`.ID` | Task ID +`.Name` | Task name +`.Image` | Task image +`.Node` | Node ID +`.DesiredState` | Desired state of the task (`running`, `shutdown`, or `accepted`) +`.CurrentState` | Current state of the task +`.Error` | Error +`.Ports` | Task published ports + +When using the `--format` option, the `service ps` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Name` and `Image` entries separated by a colon for all tasks: + +```bash +$ docker service ps --format "{{.Name}}: {{.Image}}" top +top.1: busybox +top.2: busybox +top.3: busybox +``` + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service update](service_update.md) diff --git a/components/engine/docs/reference/commandline/service_rm.md b/components/engine/docs/reference/commandline/service_rm.md new file mode 100644 index 0000000000..448f2c3b24 --- /dev/null +++ b/components/engine/docs/reference/commandline/service_rm.md @@ -0,0 +1,60 @@ +--- +title: "service rm" +description: "The service rm command description and usage" +keywords: "service, rm" +--- + + + +# service rm + +```Markdown +Usage: docker service rm SERVICE [SERVICE...] + +Remove one or more services + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +## Description + +Removes the specified services from the swarm. This command has to be run +targeting a manager node. + +## Examples + +Remove the `redis` service: + +```bash +$ docker service rm redis + +redis + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +``` + +> **Warning**: Unlike `docker rm`, this command does not ask for confirmation +> before removing a running service. + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/components/engine/docs/reference/commandline/service_scale.md b/components/engine/docs/reference/commandline/service_scale.md new file mode 100644 index 0000000000..a3aef5fd34 --- /dev/null +++ b/components/engine/docs/reference/commandline/service_scale.md @@ -0,0 +1,104 @@ +--- +title: "service scale" +description: "The service scale command description and usage" +keywords: "service, scale" +--- + + + +# service scale + +```markdown +Usage: docker service scale SERVICE=REPLICAS [SERVICE=REPLICAS...] + +Scale one or multiple replicated services + +Options: + --help Print usage +``` + +## Description + +The scale command enables you to scale one or more replicated services either up +or down to the desired number of replicas. This command cannot be applied on +services which are global mode. The command will return immediately, but the +actual scaling of the service may take some time. To stop all replicas of a +service while keeping the service active in the swarm you can set the scale to 0. + +## Examples + +### Scale a single service + +The following command scales the "frontend" service to 50 tasks. + +```bash +$ docker service scale frontend=50 + +frontend scaled to 50 +``` + +The following command tries to scale a global service to 10 tasks and returns an error. + +```bash +$ docker service create --mode global --name backend backend:latest + +b4g08uwuairexjub6ome6usqh + +$ docker service scale backend=10 + +backend: scale can only be used with replicated mode +``` + +Directly afterwards, run `docker service ls`, to see the actual number of +replicas. + +```bash +$ docker service ls --filter name=frontend + +ID NAME MODE REPLICAS IMAGE +3pr5mlvu3fh9 frontend replicated 15/50 nginx:alpine +``` + +You can also scale a service using the [`docker service update`](service_update.md) +command. The following commands are equivalent: + +```bash +$ docker service scale frontend=50 +$ docker service update --replicas=50 frontend +``` + +### Scale multiple services + +The `docker service scale` command allows you to set the desired number of +tasks for multiple services at once. The following example scales both the +backend and frontend services: + +```bash +$ docker service scale backend=3 frontend=5 + +backend scaled to 3 +frontend scaled to 5 + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +3pr5mlvu3fh9 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/components/engine/docs/reference/commandline/service_update.md b/components/engine/docs/reference/commandline/service_update.md new file mode 100644 index 0000000000..93c5750eee --- /dev/null +++ b/components/engine/docs/reference/commandline/service_update.md @@ -0,0 +1,265 @@ +--- +title: "service update" +description: "The service update command description and usage" +keywords: "service, update" +--- + + + +# service update + +```Markdown +Usage: docker service update [OPTIONS] SERVICE + +Update a service + +Options: + --args command Service command args + --constraint-add list Add or update a placement constraint + --constraint-rm list Remove a constraint + --container-label-add list Add or update a container label + --container-label-rm list Remove a container label by its key + -d, --detach Exit immediately instead of waiting for the service to converge (default true) + --dns-add list Add or update a custom DNS server + --dns-option-add list Add or update a DNS option + --dns-option-rm list Remove a DNS option + --dns-rm list Remove a custom DNS server + --dns-search-add list Add or update a custom DNS search domain + --dns-search-rm list Remove a DNS search domain + --endpoint-mode string Endpoint mode (vip or dnsrr) + --entrypoint command Overwrite the default ENTRYPOINT of the image + --env-add list Add or update an environment variable + --env-rm list Remove an environment variable + --force Force update even if no changes require it + --group-add list Add an additional supplementary user group to the container + --group-rm list Remove a previously added supplementary user group from the container + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ms|s|m|h) + --health-retries int Consecutive failures needed to report unhealthy + --health-start-period duration Start period for the container to initialize before counting retries towards unstable (ms|s|m|h) + --health-timeout duration Maximum time to allow one check to run (ms|s|m|h) + --help Print usage + --host-add list Add or update a custom host-to-IP mapping (host:ip) + --host-rm list Remove a custom host-to-IP mapping (host:ip) + --hostname string Container hostname + --image string Service image tag + --label-add list Add or update a service label + --label-rm list Remove a label by its key + --limit-cpu decimal Limit CPUs + --limit-memory bytes Limit Memory + --log-driver string Logging driver for service + --log-opt list Logging driver options + --mount-add mount Add or update a mount on a service + --mount-rm list Remove a mount by its target path + --network-add list Add a network + --network-rm list Remove a network + --no-healthcheck Disable any container-specified HEALTHCHECK + --placement-pref-add pref Add a placement preference + --placement-pref-rm pref Remove a placement preference + --publish-add port Add or update a published port + --publish-rm port Remove a published port by its target port + -q, --quiet Suppress progress output + --read-only Mount the container's root filesystem as read only + --replicas uint Number of tasks + --reserve-cpu decimal Reserve CPUs + --reserve-memory bytes Reserve Memory + --restart-condition string Restart when condition is met ("none"|"on-failure"|"any") + --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) + --restart-max-attempts uint Maximum number of restarts before giving up + --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) + --rollback Rollback to previous specification + --rollback-delay duration Delay between task rollbacks (ns|us|ms|s|m|h) + --rollback-failure-action string Action on rollback failure ("pause"|"continue") + --rollback-max-failure-ratio float Failure rate to tolerate during a rollback + --rollback-monitor duration Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h) + --rollback-order string Rollback order ("start-first"|"stop-first") (default "stop-first") + --rollback-parallelism uint Maximum number of tasks rolled back simultaneously (0 to roll back all at once) + --secret-add secret Add or update a secret on a service + --secret-rm list Remove a secret + --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) + --stop-signal string Signal to stop the container + -t, --tty Allocate a pseudo-TTY + --update-delay duration Delay between updates (ns|us|ms|s|m|h) + --update-failure-action string Action on update failure ("pause"|"continue"|"rollback") + --update-max-failure-ratio float Failure rate to tolerate during an update + --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) + --update-order string Update order ("start-first"|"stop-first") + --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) + -u, --user string Username or UID (format: [:]) + --with-registry-auth Send registry authentication details to swarm agents + -w, --workdir string Working directory inside the container +``` + +## Description + +Updates a service as described by the specified parameters. This command has to be run targeting a manager node. +The parameters are the same as [`docker service create`](service_create.md). Please look at the description there +for further information. + +Normally, updating a service will only cause the service's tasks to be replaced with new ones if a change to the +service requires recreating the tasks for it to take effect. For example, only changing the +`--update-parallelism` setting will not recreate the tasks, because the individual tasks are not affected by this +setting. However, the `--force` flag will cause the tasks to be recreated anyway. This can be used to perform a +rolling restart without any changes to the service parameters. + +## Examples + +### Update a service + +```bash +$ docker service update --limit-cpu 2 redis +``` + +### Perform a rolling restart with no parameter changes + +```bash +$ docker service update --force --update-parallelism 1 --update-delay 30s redis +``` + +In this example, the `--force` flag causes the service's tasks to be shut down +and replaced with new ones even though none of the other parameters would +normally cause that to happen. The `--update-parallelism 1` setting ensures +that only one task is replaced at a time (this is the default behavior). The +`--update-delay 30s` setting introduces a 30 second delay between tasks, so +that the rolling restart happens gradually. + +### Add or remove mounts + +Use the `--mount-add` or `--mount-rm` options add or remove a service's bind-mounts +or volumes. + +The following example creates a service which mounts the `test-data` volume to +`/somewhere`. The next step updates the service to also mount the `other-volume` +volume to `/somewhere-else`volume, The last step unmounts the `/somewhere` mount +point, effectively removing the `test-data` volume. Each command returns the +service name. + +- The `--mount-add` flag takes the same parameters as the `--mount` flag on + `service create`. Refer to the [volumes and + bind-mounts](service_create.md#volumes-and-bind-mounts-mount) section in the + `service create` reference for details. + +- The `--mount-rm` flag takes the `target` path of the mount. + +```bash +$ docker service create \ + --name=myservice \ + --mount \ + type=volume,source=test-data,target=/somewhere \ + nginx:alpine \ + myservice + +myservice + +$ docker service update \ + --mount-add \ + type=volume,source=other-volume,target=/somewhere-else \ + myservice + +myservice + +$ docker service update --mount-rm /somewhere myservice + +myservice +``` + +### Rolling back to the previous version of a service + +Use the `--rollback` option to roll back to the previous version of the service. + +This will revert the service to the configuration that was in place before the most recent `docker service update` command. + +The following example updates the number of replicas for the service from 4 to 5, and then rolls back to the previous configuration. + +```bash +$ docker service update --replicas=5 web + +web + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +80bvrzp6vxf3 web replicated 0/5 nginx:alpine + +``` +Roll back the `web` service... + +```bash +$ docker service update --rollback web + +web + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +80bvrzp6vxf3 web replicated 0/4 nginx:alpine + +``` + +Other options can be combined with `--rollback` as well, for example, `--update-delay 0s` to execute the rollback without a delay between tasks: + +```bash +$ docker service update \ + --rollback \ + --update-delay 0s + web + +web + +``` + +Services can also be set up to roll back to the previous version automatically +when an update fails. To set up a service for automatic rollback, use +`--update-failure-action=rollback`. A rollback will be triggered if the fraction +of the tasks which failed to update successfully exceeds the value given with +`--update-max-failure-ratio`. + +The rate, parallelism, and other parameters of a rollback operation are +determined by the values passed with the following flags: + +- `--rollback-delay` +- `--rollback-failure-action` +- `--rollback-max-failure-ratio` +- `--rollback-monitor` +- `--rollback-parallelism` + +For example, a service set up with `--update-parallelism 1 --rollback-parallelism 3` +will update one task at a time during a normal update, but during a rollback, 3 +tasks at a time will get rolled back. These rollback parameters are respected both +during automatic rollbacks and for rollbacks initiated manually using `--rollback`. + +### Add or remove secrets + +Use the `--secret-add` or `--secret-rm` options add or remove a service's +secrets. + +The following example adds a secret named `ssh-2` and removes `ssh-1`: + +```bash +$ docker service update \ + --secret-add source=ssh-2,target=ssh-2 \ + --secret-rm ssh-1 \ + myservice +``` + +### Update services using templates + +Some flags of `service update` support the use of templating. +See [`service create`](./service_create.md#templating) for the reference. + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service ps](service_ps.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) diff --git a/components/engine/docs/reference/commandline/stack.md b/components/engine/docs/reference/commandline/stack.md new file mode 100644 index 0000000000..94e3e252f9 --- /dev/null +++ b/components/engine/docs/reference/commandline/stack.md @@ -0,0 +1,39 @@ +--- +title: "stack" +description: "The stack command description and usage" +keywords: "stack" +--- + + + +# stack + +```markdown +Usage: docker stack COMMAND + +Manage Docker stacks + +Options: + --help Print usage + +Commands: + deploy Deploy a new stack or update an existing stack + ls List stacks + ps List the tasks in the stack + rm Remove the stack + services List the services in the stack + +Run 'docker stack COMMAND --help' for more information on a command. +``` + +## Description + +Manage stacks. + diff --git a/components/engine/docs/reference/commandline/stack_deploy.md b/components/engine/docs/reference/commandline/stack_deploy.md new file mode 100644 index 0000000000..d57ef0f76c --- /dev/null +++ b/components/engine/docs/reference/commandline/stack_deploy.md @@ -0,0 +1,107 @@ +--- +title: "stack deploy" +description: "The stack deploy command description and usage" +keywords: "stack, deploy, up" +--- + + + +# stack deploy + +```markdown +Usage: docker stack deploy [OPTIONS] STACK + +Deploy a new stack or update an existing stack + +Aliases: + deploy, up + +Options: + --bundle-file string Path to a Distributed Application Bundle file + -c, --compose-file string Path to a Compose file + --help Print usage + --prune Prune services that are no longer referenced + --with-registry-auth Send registry authentication details to Swarm agents +``` + +## Description + +Create and update a stack from a `compose` or a `dab` file on the swarm. This command +has to be run targeting a manager node. + +## Examples + +### Compose file + +The `deploy` command supports compose file version `3.0` and above." + +```bash +$ docker stack deploy --compose-file docker-compose.yml vossibility + +Ignoring unsupported options: links + +Creating network vossibility_vossibility +Creating network vossibility_default +Creating service vossibility_nsqd +Creating service vossibility_logstash +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_ghollector +Creating service vossibility_lookupd +``` + +You can verify that the services were correctly created + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +### DAB file + +```bash +$ docker stack deploy --bundle-file vossibility-stack.dab vossibility + +Loading bundle from vossibility-stack.dab +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_logstash +Creating service vossibility_lookupd +Creating service vossibility_nsqd +Creating service vossibility_vossibility-collector +``` + +You can verify that the services were correctly created: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## Related commands + +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/components/engine/docs/reference/commandline/stack_ls.md b/components/engine/docs/reference/commandline/stack_ls.md new file mode 100644 index 0000000000..91349b6947 --- /dev/null +++ b/components/engine/docs/reference/commandline/stack_ls.md @@ -0,0 +1,76 @@ +--- +title: "stack ls" +description: "The stack ls command description and usage" +keywords: "stack, ls" +--- + + + +# stack ls + +```markdown +Usage: docker stack ls + +List stacks + +Aliases: + ls, list + +Options: + --help Print usage + --format string Pretty-print stacks using a Go template +``` + +## Description + +Lists the stacks. + +## Examples + +The following command shows all stacks and some additional information: + +```bash +$ docker stack ls + +ID SERVICES +vossibility-stack 6 +myapp 2 +``` + +### Formatting + +The formatting option (`--format`) pretty-prints stacks using a Go template. + +Valid placeholders for the Go template are listed below: + +| Placeholder | Description | +| ----------- | ------------------ | +| `.Name` | Stack name | +| `.Services` | Number of services | + +When using the `--format` option, the `stack ls` command either outputs +the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Name` and `Services` entries separated by a colon for all stacks: + +```bash +$ docker stack ls --format "{{.Name}}: {{.Services}}" +web-server: 1 +web-cache: 4 +``` + +## Related commands + +* [stack deploy](stack_deploy.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/components/engine/docs/reference/commandline/stack_ps.md b/components/engine/docs/reference/commandline/stack_ps.md new file mode 100644 index 0000000000..901b46b22d --- /dev/null +++ b/components/engine/docs/reference/commandline/stack_ps.md @@ -0,0 +1,230 @@ +--- +title: "stack ps" +description: "The stack ps command description and usage" +keywords: "stack, ps" +--- + + + +# stack ps + +```markdown +Usage: docker stack ps [OPTIONS] STACK + +List the tasks in the stack + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print tasks using a Go template + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output + -q, --quiet Only display task IDs +``` + +## Description + +Lists the tasks that are running as part of the specified stack. This +command has to be run targeting a manager node. + +## Examples + +### List the tasks that are part of a stack + +The following command shows all the tasks that are part of the `voting` stack: + +```bash +$ docker stack ps voting +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +xim5bcqtgk1b voting_worker.1 dockersamples/examplevotingapp_worker:latest node2 Running Running 2 minutes ago +q7yik0ks1in6 voting_result.1 dockersamples/examplevotingapp_result:before node1 Running Running 2 minutes ago +rx5yo0866nfx voting_vote.1 dockersamples/examplevotingapp_vote:before node3 Running Running 2 minutes ago +tz6j82jnwrx7 voting_db.1 postgres:9.4 node1 Running Running 2 minutes ago +w48spazhbmxc voting_redis.1 redis:alpine node2 Running Running 3 minutes ago +6jj1m02freg1 voting_visualizer.1 dockersamples/visualizer:stable node1 Running Running 2 minutes ago +kqgdmededccb voting_vote.2 dockersamples/examplevotingapp_vote:before node2 Running Running 2 minutes ago +t72q3z038jeh voting_redis.2 redis:alpine node3 Running Running 3 minutes ago +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. + +The currently supported filters are: + +* [id](#id) +* [name](#name) +* [node](#node) +* [desired-state](#desired-state) + +#### id + +The `id` filter matches on all or a prefix of a task's ID. + +```bash +$ docker stack ps -f "id=t" voting +ID NAME IMAGE NODE DESIRED STATE CURRENTSTATE ERROR PORTS +tz6j82jnwrx7 voting_db.1 postgres:9.4 node1 Running Running 14 minutes ago +t72q3z038jeh voting_redis.2 redis:alpine node3 Running Running 14 minutes ago +``` + +#### name + +The `name` filter matches on task names. + +```bash +$ docker stack ps -f "name=voting_redis" voting +ID NAME IMAGE NODE DESIRED STATE CURRENTSTATE ERROR PORTS +w48spazhbmxc voting_redis.1 redis:alpine node2 Running Running 17 minutes ago +t72q3z038jeh voting_redis.2 redis:alpine node3 Running Running 17 minutes ago +``` + +#### node + +The `node` filter matches on a node name or a node ID. + +```bash +$ docker stack ps -f "node=node1" voting +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +q7yik0ks1in6 voting_result.1 dockersamples/examplevotingapp_result:before node1 Running Running 18 minutes ago +tz6j82jnwrx7 voting_db.1 postgres:9.4 node1 Running Running 18 minutes ago +6jj1m02freg1 voting_visualizer.1 dockersamples/visualizer:stable node1 Running Running 18 minutes ago +``` + +#### desired-state + +The `desired-state` filter can take the values `running`, `shutdown`, or `accepted`. + +```bash +$ docker stack ps -f "desired-state=running" voting +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +xim5bcqtgk1b voting_worker.1 dockersamples/examplevotingapp_worker:latest node2 Running Running 21 minutes ago +q7yik0ks1in6 voting_result.1 dockersamples/examplevotingapp_result:before node1 Running Running 21 minutes ago +rx5yo0866nfx voting_vote.1 dockersamples/examplevotingapp_vote:before node3 Running Running 21 minutes ago +tz6j82jnwrx7 voting_db.1 postgres:9.4 node1 Running Running 21 minutes ago +w48spazhbmxc voting_redis.1 redis:alpine node2 Running Running 21 minutes ago +6jj1m02freg1 voting_visualizer.1 dockersamples/visualizer:stable node1 Running Running 21 minutes ago +kqgdmededccb voting_vote.2 dockersamples/examplevotingapp_vote:before node2 Running Running 21 minutes ago +t72q3z038jeh voting_redis.2 redis:alpine node3 Running Running 21 minutes ago +``` + +### Formatting + +The formatting options (`--format`) pretty-prints tasks output using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +----------------|------------------------------------------------------------------------------------------ +`.ID` | Task ID +`.Name` | Task name +`.Image` | Task image +`.Node` | Node ID +`.DesiredState` | Desired state of the task (`running`, `shutdown`, or `accepted`) +`.CurrentState` | Current state of the task +`.Error` | Error +`.Ports` | Task published ports + +When using the `--format` option, the `stack ps` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Name` and `Image` entries separated by a colon for all tasks: + +```bash +$ docker stack ps --format "{{.Name}}: {{.Image}}" voting +voting_worker.1: dockersamples/examplevotingapp_worker:latest +voting_result.1: dockersamples/examplevotingapp_result:before +voting_vote.1: dockersamples/examplevotingapp_vote:before +voting_db.1: postgres:9.4 +voting_redis.1: redis:alpine +voting_visualizer.1: dockersamples/visualizer:stable +voting_vote.2: dockersamples/examplevotingapp_vote:before +voting_redis.2: redis:alpine +``` + +### Do not map IDs to Names + +The `--no-resolve` option shows IDs for task name, without mapping IDs to Names. + +```bash +$ docker stack ps --no-resolve voting +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +xim5bcqtgk1b 10z9fjfqzsxnezo4hb81p8mqg.1 dockersamples/examplevotingapp_worker:latest qaqt4nrzo775jrx6detglho01 Running Running 30 minutes ago +q7yik0ks1in6 hbxltua1na7mgqjnidldv5m65.1 dockersamples/examplevotingapp_result:before mxpaef1tlh23s052erw88a4w5 Running Running 30 minutes ago +rx5yo0866nfx qyprtqw1g5nrki557i974ou1d.1 dockersamples/examplevotingapp_vote:before kanqcxfajd1r16wlnqcblobmm Running Running 31 minutes ago +tz6j82jnwrx7 122f0xxngg17z52be7xspa72x.1 postgres:9.4 mxpaef1tlh23s052erw88a4w5 Running Running 31 minutes ago +w48spazhbmxc tg61x8myx563ueo3urmn1ic6m.1 redis:alpine qaqt4nrzo775jrx6detglho01 Running Running 31 minutes ago +6jj1m02freg1 8cqlyi444kzd3panjb7edh26v.1 dockersamples/visualizer:stable mxpaef1tlh23s052erw88a4w5 Running Running 31 minutes ago +kqgdmededccb qyprtqw1g5nrki557i974ou1d.2 dockersamples/examplevotingapp_vote:before qaqt4nrzo775jrx6detglho01 Running Running 31 minutes ago +t72q3z038jeh tg61x8myx563ueo3urmn1ic6m.2 redis:alpine kanqcxfajd1r16wlnqcblobmm Running Running 31 minutes ago +``` + +### Do not truncate output + +When deploying a service, docker resolves the digest for the service's +image, and pins the service to that digest. The digest is not shown by +default, but is printed if `--no-trunc` is used. The `--no-trunc` option +also shows the non-truncated task IDs, and error-messages, as can be seen below: + +```bash +$ docker stack ps --no-trunc voting +ID NAME IMAGE NODE DESIRED STATE CURREN STATE ERROR PORTS +xim5bcqtgk1bxqz91jzo4a1s5 voting_worker.1 dockersamples/examplevotingapp_worker:latest@sha256:3e4ddf59c15f432280a2c0679c4fc5a2ee5a797023c8ef0d3baf7b1385e9fed node2 Running Runnin 32 minutes ago +q7yik0ks1in6kv32gg6y6yjf7 voting_result.1 dockersamples/examplevotingapp_result:before@sha256:83b56996e930c292a6ae5187fda84dd6568a19d97cdb933720be15c757b7463 node1 Running Runnin 32 minutes ago +rx5yo0866nfxc58zf4irsss6n voting_vote.1 dockersamples/examplevotingapp_vote:before@sha256:8e64b182c87de902f2b72321c89b4af4e2b942d76d0b772532ff27ec4c6ebf6 node3 Running Runnin 32 minutes ago +tz6j82jnwrx7n2offljp3mn03 voting_db.1 postgres:9.4@sha256:6046af499eae34d2074c0b53f9a8b404716d415e4a03e68bc1d2f8064f2b027 node1 Running Runnin 32 minutes ago +w48spazhbmxcmbjfi54gs7x90 voting_redis.1 redis:alpine@sha256:9cd405cd1ec1410eaab064a1383d0d8854d1ef74a54e1e4a92fb4ec7bdc3ee7 node2 Running Runnin 32 minutes ago +6jj1m02freg1n3z9n1evrzsbl voting_visualizer.1 dockersamples/visualizer:stable@sha256:f924ad66c8e94b10baaf7bdb9cd491ef4e982a1d048a56a17e02bf5945401e5 node1 Running Runnin 32 minutes ago +kqgdmededccbhz2wuc0e9hx7g voting_vote.2 dockersamples/examplevotingapp_vote:before@sha256:8e64b182c87de902f2b72321c89b4af4e2b942d76d0b772532ff27ec4c6ebf6 node2 Running Runnin 32 minutes ago +t72q3z038jehe1wbh9gdum076 voting_redis.2 redis:alpine@sha256:9cd405cd1ec1410eaab064a1383d0d8854d1ef74a54e1e4a92fb4ec7bdc3ee7 node3 Running Runnin 32 minutes ago +``` + +### Only display task IDs + +The `-q ` or `--quiet` option only shows IDs of the tasks in the stack. +This example outputs all task IDs of the "voting" stack; + +```bash +$ docker stack ps -q voting +xim5bcqtgk1b +q7yik0ks1in6 +rx5yo0866nfx +tz6j82jnwrx7 +w48spazhbmxc +6jj1m02freg1 +kqgdmededccb +t72q3z038jeh +``` + +This option can be used to perform batch operations. For example, you can use +the task IDs as input for other commands, such as `docker inspect`. The +following example inspects all tasks of the "voting" stack; + +```bash +$ docker inspect $(docker stack ps -q voting) + +[ + { + "ID": "xim5bcqtgk1b1gk0krq1", + "Version": { +(...) +``` + +## Related commands + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/components/engine/docs/reference/commandline/stack_rm.md b/components/engine/docs/reference/commandline/stack_rm.md new file mode 100644 index 0000000000..a1854ae6f0 --- /dev/null +++ b/components/engine/docs/reference/commandline/stack_rm.md @@ -0,0 +1,78 @@ +--- +title: "stack rm" +description: "The stack rm command description and usage" +keywords: "stack, rm, remove, down" +--- + + + +# stack rm + +```markdown +Usage: docker stack rm STACK [STACK...] + +Remove one or more stacks + +Aliases: + rm, remove, down + +Options: + --help Print usage +``` + +## Description + +Remove the stack from the swarm. This command has to be run targeting +a manager node. + +## Examples + +### Remove a stack + +This will remove the stack with the name `myapp`. Services, networks, and secrets associated with the stack will be removed. + +```bash +$ docker stack rm myapp + +Removing service myapp_redis +Removing service myapp_web +Removing service myapp_lb +Removing network myapp_default +Removing network myapp_frontend +``` + +### Remove multiple stacks + +This will remove all the specified stacks, `myapp` and `vossibility`. Services, networks, and secrets associated with all the specified stacks will be removed. + +```bash +$ docker stack rm myapp vossibility + +Removing service myapp_redis +Removing service myapp_web +Removing service myapp_lb +Removing network myapp_default +Removing network myapp_frontend +Removing service vossibility_nsqd +Removing service vossibility_logstash +Removing service vossibility_elasticsearch +Removing service vossibility_kibana +Removing service vossibility_ghollector +Removing service vossibility_lookupd +Removing network vossibility_default +Removing network vossibility_vossibility +``` + +## Related commands + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack services](stack_services.md) diff --git a/components/engine/docs/reference/commandline/stack_services.md b/components/engine/docs/reference/commandline/stack_services.md new file mode 100644 index 0000000000..b45047d409 --- /dev/null +++ b/components/engine/docs/reference/commandline/stack_services.md @@ -0,0 +1,105 @@ +--- +title: "stack services" +description: "The stack services command description and usage" +keywords: "stack, services" +advisory: "experimental" +--- + + + +# stack services (experimental) + +```markdown +Usage: docker stack services [OPTIONS] STACK + +List the services in the stack + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print services using a Go template + --help Print usage + -q, --quiet Only display IDs +``` + +## Description + +Lists the services that are running as part of the specified stack. This +command has to be run targeting a manager node. + +## Examples + +The following command shows all services in the `myapp` stack: + +```bash +$ docker stack services myapp + +ID NAME REPLICAS IMAGE COMMAND +7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f +dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. + +The following command shows both the `web` and `db` services: + +```bash +$ docker stack services --filter name=myapp_web --filter name=myapp_db myapp + +ID NAME REPLICAS IMAGE COMMAND +7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f +dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 +``` + +The currently supported filters are: + +* id / ID (`--filter id=7be5ei6sqeye`, or `--filter ID=7be5ei6sqeye`) +* name (`--filter name=myapp_web`) +* label (`--filter label=key=value`) + +### Formatting + +The formatting options (`--format`) pretty-prints services output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +------------|------------------------------------------------------------------------------------------ +`.ID` | Service ID +`.Name` | Service name +`.Mode` | Service mode (replicated, global) +`.Replicas` | Service replicas +`.Image` | Service image + +When using the `--format` option, the `stack services` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`ID`, `Mode`, and `Replicas` entries separated by a colon for all services: + +```bash +$ docker stack services --format "{{.ID}}: {{.Mode}} {{.Replicas}}" + +0zmvwuiu3vue: replicated 10/10 +fm6uf97exkul: global 5/5 +``` + + +## Related commands + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) diff --git a/components/engine/docs/reference/commandline/start.md b/components/engine/docs/reference/commandline/start.md new file mode 100644 index 0000000000..aa672289ec --- /dev/null +++ b/components/engine/docs/reference/commandline/start.md @@ -0,0 +1,34 @@ +--- +title: "start" +description: "The start command description and usage" +keywords: "Start, container, stopped" +--- + + + +# start + +```markdown +Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] + +Start one or more stopped containers + +Options: + -a, --attach Attach STDOUT/STDERR and forward signals + --detach-keys string Override the key sequence for detaching a container + --help Print usage + -i, --interactive Attach container's STDIN +``` + +## Examples + +```bash +$ docker start my_container +``` diff --git a/components/engine/docs/reference/commandline/stats.md b/components/engine/docs/reference/commandline/stats.md new file mode 100644 index 0000000000..f5c058524d --- /dev/null +++ b/components/engine/docs/reference/commandline/stats.md @@ -0,0 +1,140 @@ +--- +title: "stats" +description: "The stats command description and usage" +keywords: "container, resource, statistics" +--- + + + +# stats + +```markdown +Usage: docker stats [OPTIONS] [CONTAINER...] + +Display a live stream of container(s) resource usage statistics + +Options: + -a, --all Show all containers (default shows just running) + --format string Pretty-print images using a Go template + --help Print usage + --no-stream Disable streaming stats and only pull the first result +``` + +## Description + +The `docker stats` command returns a live data stream for running containers. To limit data to one or more specific containers, specify a list of container names or ids separated by a space. You can specify a stopped container but stopped containers do not return any data. + +If you want more detailed information about a container's resource usage, use the `/containers/(id)/stats` API endpoint. + +## Examples + +Running `docker stats` on all running containers against a Linux daemon. + +```bash +$ docker stats +CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O +1285939c1fd3 0.07% 796 KiB / 64 MiB 1.21% 788 B / 648 B 3.568 MB / 512 KB +9c76f7834ae2 0.07% 2.746 MiB / 64 MiB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B +d1ea048f04e4 0.03% 4.583 MiB / 64 MiB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B +``` + +Running `docker stats` on multiple containers by name and id against a Linux daemon. + +```bash +$ docker stats fervent_panini 5acfcb1b4fd1 +CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O +5acfcb1b4fd1 0.00% 115.2 MiB/1.045 GiB 11.03% 1.422 kB/648 B +fervent_panini 0.02% 11.08 MiB/1.045 GiB 1.06% 648 B/648 B +``` + +Running `docker stats` with customized format on all (Running and Stopped) containers. + +```bash +$ docker stats --all --format "table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}" +CONTAINER ID NAME CPU % MEM USAGE / LIMIT +c9dfa83f0317f87637d5b7e67aa4223337d947215c5a9947e697e4f7d3e0f834 ecstatic_noether 0.00% 56KiB / 15.57GiB +8f92d01cf3b29b4f5fca4cd33d907e05def7af5a3684711b20a2369d211ec67f stoic_goodall 0.07% 32.86MiB / 15.57GiB +38dd23dba00f307d53d040c1d18a91361bbdcccbf592315927d56cf13d8b7343 drunk_visvesvaraya 0.00% 0B / 0B +5a8b07ec4cc52823f3cbfdb964018623c1ba307bce2c057ccdbde5f4f6990833 big_heisenberg 0.00% 0B / 0B +``` + +`drunk_visvesvaraya` and `big_heisenberg` are stopped containers in the above example. + +Running `docker stats` on all running containers against a Windows daemon. + +```powershell +PS E:\> docker stats +CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O +09d3bb5b1604 6.61% 38.21 MiB 17.1 kB / 7.73 kB 10.7 MB / 3.57 MB +9db7aa4d986d 9.19% 38.26 MiB 15.2 kB / 7.65 kB 10.6 MB / 3.3 MB +3f214c61ad1d 0.00% 28.64 MiB 64 kB / 6.84 kB 4.42 MB / 6.93 MB +``` + +Running `docker stats` on multiple containers by name and id against a Windows daemon. + +```powershell +PS E:\> docker ps -a +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +3f214c61ad1d nanoserver "cmd" 2 minutes ago Up 2 minutes big_minsky +9db7aa4d986d windowsservercore "cmd" 2 minutes ago Up 2 minutes mad_wilson +09d3bb5b1604 windowsservercore "cmd" 2 minutes ago Up 2 minutes affectionate_easley + +PS E:\> docker stats 3f214c61ad1d mad_wilson +CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O +3f214c61ad1d 0.00% 46.25 MiB 76.3 kB / 7.92 kB 10.3 MB / 14.7 MB +mad_wilson 9.59% 40.09 MiB 27.6 kB / 8.81 kB 17 MB / 20.1 MB +``` + +### Formatting + +The formatting option (`--format`) pretty prints container output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +------------ | -------------------------------------------- +`.Container` | Container name or ID (user input) +`.Name` | Container name +`.ID` | Container ID +`.CPUPerc` | CPU percentage +`.MemUsage` | Memory usage +`.NetIO` | Network IO +`.BlockIO` | Block IO +`.MemPerc` | Memory percentage (Not available on Windows) +`.PIDs` | Number of PIDs (Not available on Windows) + + +When using the `--format` option, the `stats` command either +outputs the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Container` and `CPUPerc` entries separated by a colon for all images: + +```bash +$ docker stats --format "{{.Container}}: {{.CPUPerc}}" + +09d3bb5b1604: 6.61% +9db7aa4d986d: 9.19% +3f214c61ad1d: 0.00% +``` + +To list all containers statistics with their name, CPU percentage and memory +usage in a table format you can use: + +```bash +$ docker stats --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" + +CONTAINER CPU % PRIV WORKING SET +1285939c1fd3 0.07% 796 KiB / 64 MiB +9c76f7834ae2 0.07% 2.746 MiB / 64 MiB +d1ea048f04e4 0.03% 4.583 MiB / 64 MiB +``` diff --git a/components/engine/docs/reference/commandline/stop.md b/components/engine/docs/reference/commandline/stop.md new file mode 100644 index 0000000000..dc00b38af5 --- /dev/null +++ b/components/engine/docs/reference/commandline/stop.md @@ -0,0 +1,37 @@ +--- +title: "stop" +description: "The stop command description and usage" +keywords: "stop, SIGKILL, SIGTERM" +--- + + + +# stop + +```markdown +Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] + +Stop one or more running containers + +Options: + --help Print usage + -t, --time int Seconds to wait for stop before killing it (default 10) +``` + +## Description + +The main process inside the container will receive `SIGTERM`, and after a grace +period, `SIGKILL`. + +## Examples + +```bash +$ docker stop my_container +``` diff --git a/components/engine/docs/reference/commandline/swarm.md b/components/engine/docs/reference/commandline/swarm.md new file mode 100644 index 0000000000..e8a8224f84 --- /dev/null +++ b/components/engine/docs/reference/commandline/swarm.md @@ -0,0 +1,41 @@ +--- +title: "swarm" +description: "The swarm command description and usage" +keywords: "swarm" +--- + + + +# swarm + +```markdown +Usage: docker swarm COMMAND + +Manage Swarm + +Options: + --help Print usage + +Commands: + ca Manage root CA + init Initialize a swarm + join Join a swarm as a node and/or manager + join-token Manage join tokens + leave Leave the swarm + unlock Unlock swarm + unlock-key Manage the unlock key + update Update the swarm + +Run 'docker swarm COMMAND --help' for more information on a command. +``` + +## Description + +Manage the swarm. diff --git a/components/engine/docs/reference/commandline/swarm_ca.md b/components/engine/docs/reference/commandline/swarm_ca.md new file mode 100644 index 0000000000..4a9010c896 --- /dev/null +++ b/components/engine/docs/reference/commandline/swarm_ca.md @@ -0,0 +1,122 @@ +--- +title: "swarm ca" +description: "The swarm ca command description and usage" +keywords: "swarm, ca" +--- + + + +# swarm ca + +```markdown +Usage: docker swarm ca [OPTIONS] + +Manage root CA + +Options: + --ca-cert pem-file Path to the PEM-formatted root CA certificate to use for the new cluster + --ca-key pem-file Path to the PEM-formatted root CA key to use for the new cluster + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + -d, --detach Exit immediately instead of waiting for the root rotation to converge + --external-ca external-ca Specifications of one or more certificate signing endpoints + --help Print usage + -q, --quiet Suppress progress output + --rotate Rotate the swarm CA - if no certificate or key are provided, new ones will be generated +``` + +## Description + +View or rotate the current swarm CA certificate. This command must target a manager node. + +## Examples + +Run the `docker swarm ca` command without any options to view the current root CA certificate +in PEM format. + +```bash +$ docker swarm ca +-----BEGIN CERTIFICATE----- +MIIBazCCARCgAwIBAgIUJPzo67QC7g8Ebg2ansjkZ8CbmaswCgYIKoZIzj0EAwIw +EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTAzMTcxMDAwWhcNMzcwNDI4MTcx +MDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH +A0IABKL6/C0sihYEb935wVPRA8MqzPLn3jzou0OJRXHsCLcVExigrMdgmLCC+Va4 ++sJ+SLVO1eQbvLHH8uuDdF/QOU6jQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBSfUy5bjUnBAx/B0GkOBKp91XvxzjAKBggqhkjO +PQQDAgNJADBGAiEAnbvh0puOS5R/qvy1PMHY1iksYKh2acsGLtL/jAIvO4ACIQCi +lIwQqLkJ48SQqCjG1DBTSBsHmMSRT+6mE2My+Z3GKA== +-----END CERTIFICATE----- +``` + +Pass the `--rotate` flag (and optionally a `--ca-cert`, along with a `--ca-key` or +`--external-ca` parameter flag), in order to rotate the current swarm root CA. + +``` +$ docker swarm ca --rotate +desired root digest: sha256:05da740cf2577a25224c53019e2cce99bcc5ba09664ad6bb2a9425d9ebd1b53e + rotated TLS certificates: [=========================> ] 1/2 nodes + rotated CA certificates: [> ] 0/2 nodes +``` + +Once the rotation os finished (all the progress bars have completed) the now-current +CA certificate will be printed: + +``` +$ docker swarm ca --rotate +desired root digest: sha256:05da740cf2577a25224c53019e2cce99bcc5ba09664ad6bb2a9425d9ebd1b53e + rotated TLS certificates: [==================================================>] 2/2 nodes + rotated CA certificates: [==================================================>] 2/2 nodes +-----BEGIN CERTIFICATE----- +MIIBazCCARCgAwIBAgIUFynG04h5Rrl4lKyA4/E65tYKg8IwCgYIKoZIzj0EAwIw +EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTE2MDAxMDAwWhcNMzcwNTExMDAx +MDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH +A0IABC2DuNrIETP7C7lfiEPk39tWaaU0I2RumUP4fX4+3m+87j0DU0CsemUaaOG6 ++PxHhGu2VXQ4c9pctPHgf7vWeVajQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBSEL02z6mCI3SmMDmITMr12qCRY2jAKBggqhkjO +PQQDAgNJADBGAiEA263Eb52+825EeNQZM0AME+aoH1319Zp9/J5ijILW+6ACIQCg +gyg5u9Iliel99l7SuMhNeLkrU7fXs+Of1nTyyM73ig== +-----END CERTIFICATE----- +``` + +### `--rotate` + +Root CA Rotation is recommended if one or more of the swarm managers have been +compromised, so that those managers can no longer connect to or be trusted by +any other node in the cluster. + +Alternately, root CA rotation can be used to give control of the swarm CA +to an external CA, or to take control back from an external CA. + +The `--rotate` flag does not require any parameters to do a rotation, but you can +optionally specify a certificate and key, or a certificate and external CA URL, +and those will be used instead of an automatically-generated certificate/key pair. + +Because the root CA key should be kept secret, if provided it will not be visible +when viewing swarm any information via the CLI or API. + +The root CA rotation will not be completed until all registered nodes have +rotated their TLS certificates. If the rotation is not completing within a +reasonable amount of time, try running +`docker node ls --format {{.ID}} {{.Hostname}} {{.Status}} {{.TLSStatus}}` to +see if any nodes are down or otherwise unable to rotate TLS certificates. + + +### `--detach` + +Initiate the root CA rotation, but do not wait for the completion of or display the +progress of the rotation. + +## Related commands + +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm join-token](swarm_join_token.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock](swarm_unlock.md) +* [swarm unlock-key](swarm_unlock_key.md) diff --git a/components/engine/docs/reference/commandline/swarm_init.md b/components/engine/docs/reference/commandline/swarm_init.md new file mode 100644 index 0000000000..f011758a3a --- /dev/null +++ b/components/engine/docs/reference/commandline/swarm_init.md @@ -0,0 +1,168 @@ +--- +title: "swarm init" +description: "The swarm init command description and usage" +keywords: "swarm, init" +--- + + + +# swarm init + +```markdown +Usage: docker swarm init [OPTIONS] + +Initialize a swarm + +Options: + --advertise-addr string Advertised address (format: [:port]) + --autolock Enable manager autolocking (requiring an unlock key to start a stopped manager) + --availability string Availability of the node ("active"|"pause"|"drain") (default "active") + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + --data-path-addr string Address or interface to use for data path traffic (format: ) + --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) + --external-ca external-ca Specifications of one or more certificate signing endpoints + --force-new-cluster Force create a new cluster from current state + --help Print usage + --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) + --max-snapshots uint Number of additional Raft snapshots to retain + --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) + --task-history-limit int Task history retention limit (default 5) +``` + +## Description + +Initialize a swarm. The docker engine targeted by this command becomes a manager +in the newly created single-node swarm. + +## Examples + +```bash +$ docker swarm init --advertise-addr 192.168.99.121 +Swarm initialized: current node (bvz81updecsj6wjz393c09vti) is now a manager. + +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ + 172.17.0.2:2377 + +To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. +``` + +`docker swarm init` generates two random tokens, a worker token and a manager token. When you join +a new node to the swarm, the node joins as a worker or manager node based upon the token you pass +to [swarm join](swarm_join.md). + +After you create the swarm, you can display or rotate the token using +[swarm join-token](swarm_join_token.md). + +### `--autolock` + +This flag enables automatic locking of managers with an encryption key. The +private keys and data stored by all managers will be protected by the +encryption key printed in the output, and will not be accessible without it. +Thus, it is very important to store this key in order to activate a manager +after it restarts. The key can be passed to `docker swarm unlock` to reactivate +the manager. Autolock can be disabled by running +`docker swarm update --autolock=false`. After disabling it, the encryption key +is no longer required to start the manager, and it will start up on its own +without user intervention. + +### `--cert-expiry` + +This flag sets the validity period for node certificates. + +### `--dispatcher-heartbeat` + +This flag sets the frequency with which nodes are told to use as a +period to report their health. + +### `--external-ca` + +This flag sets up the swarm to use an external CA to issue node certificates. The value takes +the form `protocol=X,url=Y`. The value for `protocol` specifies what protocol should be used +to send signing requests to the external CA. Currently, the only supported value is `cfssl`. +The URL specifies the endpoint where signing requests should be submitted. + +### `--force-new-cluster` + +This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data. + +### `--listen-addr` + +The node listens for inbound swarm manager traffic on this address. The default is to listen on +0.0.0.0:2377. It is also possible to specify a network interface to listen on that interface's +address; for example `--listen-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address or interface +name, the default port 2377 will be used. + +### `--advertise-addr` + +This flag specifies the address that will be advertised to other members of the +swarm for API access and overlay networking. If unspecified, Docker will check +if the system has a single IP address, and use that IP address with the +listening port (see `--listen-addr`). If the system has multiple IP addresses, +`--advertise-addr` must be specified so that the correct address is chosen for +inter-manager communication and overlay networking. + +It is also possible to specify a network interface to advertise that interface's address; +for example `--advertise-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address or interface +name, the default port 2377 will be used. + +### `--data-path-addr` + +This flag specifies the address that global scope network drivers will publish towards +other nodes in order to reach the containers running on this node. +Using this parameter it is then possible to separate the container's data traffic from the +management traffic of the cluster. +If unspecified, Docker will use the same IP address or interface that is used for the +advertise address. + +### `--task-history-limit` + +This flag sets up task history retention limit. + +### `--max-snapshots` + +This flag sets the number of old Raft snapshots to retain in addition to the +current Raft snapshots. By default, no old snapshots are retained. This option +may be used for debugging, or to store old snapshots of the swarm state for +disaster recovery purposes. + +### `--snapshot-interval` + +This flag specifies how many log entries to allow in between Raft snapshots. +Setting this to a higher number will trigger snapshots less frequently. +Snapshots compact the Raft log and allow for more efficient transfer of the +state to new managers. However, there is a performance cost to taking snapshots +frequently. + +### `--availability` + +This flag specifies the availability of the node at the time the node joins a master. +Possible availability values are `active`, `pause`, or `drain`. + +This flag is useful in certain situations. For example, a cluster may want to have +dedicated manager nodes that are not served as worker nodes. This could be achieved +by passing `--availability=drain` to `docker swarm init`. + + +## Related commands + +* [swarm ca](swarm_ca.md) +* [swarm join](swarm_join.md) +* [swarm join-token](swarm_join_token.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock](swarm_unlock.md) +* [swarm unlock-key](swarm_unlock_key.md) +* [swarm update](swarm_update.md) diff --git a/components/engine/docs/reference/commandline/swarm_join.md b/components/engine/docs/reference/commandline/swarm_join.md new file mode 100644 index 0000000000..8d2dfe6d00 --- /dev/null +++ b/components/engine/docs/reference/commandline/swarm_join.md @@ -0,0 +1,130 @@ +--- +title: "swarm join" +description: "The swarm join command description and usage" +keywords: "swarm, join" +--- + + + +# swarm join + +```markdown +Usage: docker swarm join [OPTIONS] HOST:PORT + +Join a swarm as a node and/or manager + +Options: + --advertise-addr string Advertised address (format: [:port]) + --availability string Availability of the node ("active"|"pause"|"drain") (default "active") + --data-path-addr string Address or interface to use for data path traffic (format: ) + --help Print usage + --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) + --token string Token for entry into the swarm +``` + +## Description + +Join a node to a swarm. The node joins as a manager node or worker node based upon the token you +pass with the `--token` flag. If you pass a manager token, the node joins as a manager. If you +pass a worker token, the node joins as a worker. + +## Examples + +### Join a node to swarm as a manager + +The example below demonstrates joining a manager node using a manager token. + +```bash +$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 192.168.99.121:2377 +This node joined a swarm as a manager. +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +dkp8vy1dq1kxleu9g4u78tlag * manager2 Ready Active Reachable +dvfxp4zseq4s0rih1selh0d20 manager1 Ready Active Leader +``` + +A cluster should only have 3-7 managers at most, because a majority of managers must be available +for the cluster to function. Nodes that aren't meant to participate in this management quorum +should join as workers instead. Managers should be stable hosts that have static IP addresses. + +### Join a node to swarm as a worker + +The example below demonstrates joining a worker node using a worker token. + +```bash +$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx 192.168.99.121:2377 +This node joined a swarm as a worker. +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active +dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable +dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader +``` + +### `--listen-addr value` + +If the node is a manager, it will listen for inbound swarm manager traffic on this +address. The default is to listen on 0.0.0.0:2377. It is also possible to specify a +network interface to listen on that interface's address; for example `--listen-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address, or interface +name, the default port 2377 will be used. + +This flag is generally not necessary when joining an existing swarm. + +### `--advertise-addr value` + +This flag specifies the address that will be advertised to other members of the +swarm for API access. If unspecified, Docker will check if the system has a +single IP address, and use that IP address with the listening port (see +`--listen-addr`). If the system has multiple IP addresses, `--advertise-addr` +must be specified so that the correct address is chosen for inter-manager +communication and overlay networking. + +It is also possible to specify a network interface to advertise that interface's address; +for example `--advertise-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address, or interface +name, the default port 2377 will be used. + +This flag is generally not necessary when joining an existing swarm. + +### `--data-path-addr` + +This flag specifies the address that global scope network drivers will publish towards +other nodes in order to reach the containers running on this node. +Using this parameter it is then possible to separate the container's data traffic from the +management traffic of the cluster. +If unspecified, Docker will use the same IP address or interface that is used for the +advertise address. + +### `--token string` + +Secret value required for nodes to join the swarm + +### `--availability` + +This flag specifies the availability of the node at the time the node joins a master. +Possible availability values are `active`, `pause`, or `drain`. + +This flag is useful in certain situations. For example, a cluster may want to have +dedicated manager nodes that are not served as worker nodes. This could be achieved +by passing `--availability=drain` to `docker swarm join`. + + +## Related commands + +* [swarm ca](swarm_ca.md) +* [swarm init](swarm_init.md) +* [swarm join-token](swarm_join_token.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock](swarm_unlock.md) +* [swarm unlock-key](swarm_unlock_key.md) +* [swarm update](swarm_update.md) diff --git a/components/engine/docs/reference/commandline/swarm_join_token.md b/components/engine/docs/reference/commandline/swarm_join_token.md new file mode 100644 index 0000000000..44d9cf3ec7 --- /dev/null +++ b/components/engine/docs/reference/commandline/swarm_join_token.md @@ -0,0 +1,115 @@ +--- +title: "swarm join-token" +description: "The swarm join-token command description and usage" +keywords: "swarm, join-token" +--- + + + +# swarm join-token + +```markdown +Usage: docker swarm join-token [OPTIONS] (worker|manager) + +Manage join tokens + +Options: + --help Print usage + -q, --quiet Only display token + --rotate Rotate join token +``` + +## Description + +Join tokens are secrets that allow a node to join the swarm. There are two +different join tokens available, one for the worker role and one for the manager +role. You pass the token using the `--token` flag when you run +[swarm join](swarm_join.md). Nodes use the join token only when they join the +swarm. + +## Examples + +You can view or rotate the join tokens using `swarm join-token`. + +As a convenience, you can pass `worker` or `manager` as an argument to +`join-token` to print the full `docker swarm join` command to join a new node to +the swarm: + +```bash +$ docker swarm join-token worker +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ + 172.17.0.2:2377 + +$ docker swarm join-token manager +To add a manager to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 \ + 172.17.0.2:2377 +``` + +Use the `--rotate` flag to generate a new join token for the specified role: + +```bash +$ docker swarm join-token --rotate worker +Successfully rotated worker join token. + +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t \ + 172.17.0.2:2377 +``` + +After using `--rotate`, only the new token will be valid for joining with the specified role. + +The `-q` (or `--quiet`) flag only prints the token: + +```bash +$ docker swarm join-token -q worker + +SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t +``` + +### `--rotate` + +Because tokens allow new nodes to join the swarm, you should keep them secret. +Be particularly careful with manager tokens since they allow new manager nodes +to join the swarm. A rogue manager has the potential to disrupt the operation of +your swarm. + +Rotate your swarm's join token if a token gets checked-in to version control, +stolen, or a node is compromised. You may also want to periodically rotate the +token to ensure any unknown token leaks do not allow a rogue node to join +the swarm. + +To rotate the join token and print the newly generated token, run +`docker swarm join-token --rotate` and pass the role: `manager` or `worker`. + +Rotating a join-token means that no new nodes will be able to join the swarm +using the old token. Rotation does not affect existing nodes in the swarm +because the join token is only used for authorizing new nodes joining the swarm. + +### `--quiet` + +Only print the token. Do not print a complete command for joining. + +## Related commands + +* [swarm ca](swarm_ca.md) +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock](swarm_unlock.md) +* [swarm unlock-key](swarm_unlock_key.md) +* [swarm update](swarm_update.md) diff --git a/components/engine/docs/reference/commandline/swarm_leave.md b/components/engine/docs/reference/commandline/swarm_leave.md new file mode 100644 index 0000000000..1c898ea9a5 --- /dev/null +++ b/components/engine/docs/reference/commandline/swarm_leave.md @@ -0,0 +1,72 @@ +--- +title: "swarm leave" +description: "The swarm leave command description and usage" +keywords: "swarm, leave" +--- + + + +# swarm leave + +```markdown +Usage: docker swarm leave [OPTIONS] + +Leave the swarm + +Options: + -f, --force Force this node to leave the swarm, ignoring warnings + --help Print usage +``` + +## Description + +When you run this command on a worker, that worker leaves the swarm. + +You can use the `--force` option on a manager to remove it from the swarm. +However, this does not reconfigure the swarm to ensure that there are enough +managers to maintain a quorum in the swarm. The safe way to remove a manager +from a swarm is to demote it to a worker and then direct it to leave the quorum +without using `--force`. Only use `--force` in situations where the swarm will +no longer be used after the manager leaves, such as in a single-node swarm. + +## Examples + +Consider the following swarm, as seen from the manager: + +```bash +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active +dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active +dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader +``` + +To remove `worker2`, issue the following command from `worker2` itself: + +```bash +$ docker swarm leave +Node left the default swarm. +``` + +The node will still appear in the node list, and marked as `down`. It no longer +affects swarm operation, but a long list of `down` nodes can clutter the node +list. To remove an inactive node from the list, use the [`node rm`](node_rm.md) +command. + +## Related commands + +* [swarm ca](swarm_ca.md) +* [node rm](node_rm.md) +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm join-token](swarm_join_token.md) +* [swarm unlock](swarm_unlock.md) +* [swarm unlock-key](swarm_unlock_key.md) +* [swarm update](swarm_update.md) diff --git a/components/engine/docs/reference/commandline/swarm_unlock.md b/components/engine/docs/reference/commandline/swarm_unlock.md new file mode 100644 index 0000000000..d2f5fc4954 --- /dev/null +++ b/components/engine/docs/reference/commandline/swarm_unlock.md @@ -0,0 +1,49 @@ +--- +title: "swarm unlock" +description: "The swarm unlock command description and usage" +keywords: "swarm, unlock" +--- + + + +# swarm unlock + +```markdown +Usage: docker swarm unlock + +Unlock swarm + +Options: + --help Print usage +``` + +## Description + +Unlocks a locked manager using a user-supplied unlock key. This command must be +used to reactivate a manager after its Docker daemon restarts if the autolock +setting is turned on. The unlock key is printed at the time when autolock is +enabled, and is also available from the `docker swarm unlock-key` command. + +## Examples + +```bash +$ docker swarm unlock +Please enter unlock key: +``` + +## Related commands + +* [swarm ca](swarm_ca.md) +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm join-token](swarm_join_token.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock-key](swarm_unlock_key.md) +* [swarm update](swarm_update.md) diff --git a/components/engine/docs/reference/commandline/swarm_unlock_key.md b/components/engine/docs/reference/commandline/swarm_unlock_key.md new file mode 100644 index 0000000000..c0efd04922 --- /dev/null +++ b/components/engine/docs/reference/commandline/swarm_unlock_key.md @@ -0,0 +1,92 @@ +--- +title: "swarm unlock-key" +description: "The swarm unlock-keycommand description and usage" +keywords: "swarm, unlock-key" +--- + + + +# swarm unlock-key + +```markdown +Usage: docker swarm unlock-key [OPTIONS] + +Manage the unlock key + +Options: + --help Print usage + -q, --quiet Only display token + --rotate Rotate unlock key +``` + +## Description + +An unlock key is a secret key needed to unlock a manager after its Docker daemon +restarts. These keys are only used when the autolock feature is enabled for the +swarm. + +You can view or rotate the unlock key using `swarm unlock-key`. To view the key, +run the `docker swarm unlock-key` command without any arguments: + +## Examples + +```bash +$ docker swarm unlock-key + +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + SWMKEY-1-fySn8TY4w5lKcWcJPIpKufejh9hxx5KYwx6XZigx3Q4 + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. +``` + +Use the `--rotate` flag to rotate the unlock key to a new, randomly-generated +key: + +```bash +$ docker swarm unlock-key --rotate +Successfully rotated manager unlock key. + +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. +``` + +The `-q` (or `--quiet`) flag only prints the key: + +```bash +$ docker swarm unlock-key -q +SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 +``` + +### `--rotate` + +This flag rotates the unlock key, replacing it with a new randomly-generated +key. The old unlock key will no longer be accepted. + +### `--quiet` + +Only print the unlock key, without instructions. + +## Related commands + +* [swarm ca](swarm_ca.md) +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm join-token](swarm_join_token.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock](swarm_unlock.md) +* [swarm update](swarm_update.md) diff --git a/components/engine/docs/reference/commandline/swarm_update.md b/components/engine/docs/reference/commandline/swarm_update.md new file mode 100644 index 0000000000..544ef381c0 --- /dev/null +++ b/components/engine/docs/reference/commandline/swarm_update.md @@ -0,0 +1,52 @@ +--- +title: "swarm update" +description: "The swarm update command description and usage" +keywords: "swarm, update" +--- + + + +# swarm update + +```markdown +Usage: docker swarm update [OPTIONS] + +Update the swarm + +Options: + --autolock Change manager autolocking setting (true|false) + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) + --external-ca external-ca Specifications of one or more certificate signing endpoints + --help Print usage + --max-snapshots uint Number of additional Raft snapshots to retain + --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) + --task-history-limit int Task history retention limit (default 5) +``` + +## Description + +Updates a swarm with new parameter values. This command must target a manager node. + +## Examples + +```bash +$ docker swarm update --cert-expiry 720h +``` + +## Related commands + +* [swarm ca](swarm_ca.md) +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm join-token](swarm_join_token.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock](swarm_unlock.md) +* [swarm unlock-key](swarm_unlock_key.md) diff --git a/components/engine/docs/reference/commandline/system.md b/components/engine/docs/reference/commandline/system.md new file mode 100644 index 0000000000..2484a4a987 --- /dev/null +++ b/components/engine/docs/reference/commandline/system.md @@ -0,0 +1,37 @@ +--- +title: "system" +description: "The system command description and usage" +keywords: "system" +--- + + + +# system + +```markdown +Usage: docker system COMMAND + +Manage Docker + +Options: + --help Print usage + +Commands: + df Show docker disk usage + events Get real time events from the server + info Display system-wide information + prune Remove unused data + +Run 'docker system COMMAND --help' for more information on a command. +``` + +## Description + +Manage Docker. diff --git a/components/engine/docs/reference/commandline/system_df.md b/components/engine/docs/reference/commandline/system_df.md new file mode 100644 index 0000000000..aedd9779e4 --- /dev/null +++ b/components/engine/docs/reference/commandline/system_df.md @@ -0,0 +1,140 @@ +--- +title: "system df" +description: "The system df command description and usage" +keywords: "system, data, usage, disk" +--- + + + +# system df + +```markdown +Usage: docker system df [OPTIONS] + +Show docker filesystem usage + +Options: + --format string Pretty-print images using a Go template + --help Print usage + -v, --verbose Show detailed information on space usage +``` + +## Description + +The `docker system df` command displays information regarding the +amount of disk space used by the docker daemon. + +## Examples + +By default the command will just show a summary of the data used: + +```bash +$ docker system df + +TYPE TOTAL ACTIVE SIZE RECLAIMABLE +Images 5 2 16.43 MB 11.63 MB (70%) +Containers 2 0 212 B 212 B (100%) +Local Volumes 2 1 36 B 0 B (0%) +``` + +A more detailed view can be requested using the `-v, --verbose` flag: + +```bash +$ docker system df -v + +Images space usage: + +REPOSITORY TAG IMAGE ID CREATED SIZE SHARED SIZE UNIQUE SIZE CONTAINERS +my-curl latest b2789dd875bf 6 minutes ago 11 MB 11 MB 5 B 0 +my-jq latest ae67841be6d0 6 minutes ago 9.623 MB 8.991 MB 632.1 kB 0 + a0971c4015c1 6 minutes ago 11 MB 11 MB 0 B 0 +alpine latest 4e38e38c8ce0 9 weeks ago 4.799 MB 0 B 4.799 MB 1 +alpine 3.3 47cf20d8c26c 9 weeks ago 4.797 MB 4.797 MB 0 B 1 + +Containers space usage: + +CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES +4a7f7eebae0f alpine:latest "sh" 1 0 B 16 minutes ago Exited (0) 5 minutes ago hopeful_yalow +f98f9c2aa1ea alpine:3.3 "sh" 1 212 B 16 minutes ago Exited (0) 48 seconds ago anon-vol + +Local Volumes space usage: + +NAME LINKS SIZE +07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e 2 36 B +my-named-vol 0 0 B +``` + +* `SHARED SIZE` is the amount of space that an image shares with another one (i.e. their common data) +* `UNIQUE SIZE` is the amount of space that is only used by a given image +* `SIZE` is the virtual size of the image, it is the sum of `SHARED SIZE` and `UNIQUE SIZE` + +> **Note**: Network information is not shown because it doesn't consume the disk +> space. + +## Performance + +The `system df` command can be very resource-intensive. It traverses the +filesystem of every image, container, and volume in the system. You should be +careful running this command in systems with lots of images, containers, or +volumes or in systems where some images, containers, or volumes have very large +filesystems with many files. You should also be careful not to run this command +in systems where performance is critical. + +## Format the output + +The formatting option (`--format`) pretty prints the disk usage output +using a Go template. + +Valid placeholders for the Go template are listed below: + +| Placeholder | Description | +| -------------- | ------------------------------------------ | +| `.Type` | `Images`, `Containers` and `Local Volumes` | +| `.TotalCount` | Total number of items | +| `.Active` | Number of active items | +| `.Size` | Available size | +| `.Reclaimable` | Reclaimable size | + +When using the `--format` option, the `system df` command outputs +the data exactly as the template declares or, when using the +`table` directive, will include column headers as well. + +The following example uses a template without headers and outputs the +`Type` and `TotalCount` entries separated by a colon: + +```bash +$ docker system df --format "{{.Type}}: {{.TotalCount}}" + +Images: 2 +Containers: 4 +Local Volumes: 1 +``` + +To list the disk usage with size and reclaimable size in a table format you +can use: + +```bash +$ docker system df --format "table {{.Type}}\t{{.Size}}\t{{.Reclaimable}}" + +TYPE SIZE RECLAIMABLE +Images 2.547 GB 2.342 GB (91%) +Containers 0 B 0 B +Local Volumes 150.3 MB 150.3 MB (100%) + +``` + +**Note** the format option is meaningless when verbose is true. + +## Related commands +* [system prune](system_prune.md) +* [container prune](container_prune.md) +* [volume prune](volume_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) diff --git a/components/engine/docs/reference/commandline/system_events.md b/components/engine/docs/reference/commandline/system_events.md new file mode 100644 index 0000000000..76ef4de8f6 --- /dev/null +++ b/components/engine/docs/reference/commandline/system_events.md @@ -0,0 +1,345 @@ +--- +title: "system events" +description: "The system events command description and usage" +keywords: "system, events, container, report" +--- + + + +# system events + +```markdown +Usage: docker system events [OPTIONS] + +Get real time events from the server + +Options: + -f, --filter value Filter output based on conditions provided (default []) + --format string Format the output using the given Go template + --help Print usage + --since string Show all events created since timestamp + --until string Stream events until this timestamp +``` + +## Description + +Use `docker system events` to get real-time events from the server. These +events differ per Docker object type. + +### Object types + +#### Containers + +Docker containers report the following events: + +- `attach` +- `commit` +- `copy` +- `create` +- `destroy` +- `detach` +- `die` +- `exec_create` +- `exec_detach` +- `exec_start` +- `export` +- `health_status` +- `kill` +- `oom` +- `pause` +- `rename` +- `resize` +- `restart` +- `start` +- `stop` +- `top` +- `unpause` +- `update` + +#### Images + +Docker images report the following events: + +- `delete` +- `import` +- `load` +- `pull` +- `push` +- `save` +- `tag` +- `untag` + +#### Plugins + +Docker plugins report the following events: + +- `install` +- `enable` +- `disable` +- `remove` + +#### Volumes + +Docker volumes report the following events: + +- `create` +- `mount` +- `unmount` +- `destroy` + +#### Networks + +Docker networks report the following events: + +- `create` +- `connect` +- `disconnect` +- `destroy` + +#### Daemons + +Docker daemons report the following events: + +- `reload` + +### Limiting, filtering, and formatting the output + +#### Limit events by time + +The `--since` and `--until` parameters can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the client machine’s time. If you do not provide the `--since` option, +the command returns only new and/or live events. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +#### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If you would +like to use multiple filters, pass multiple flags (e.g., +`--filter "foo=bar" --filter "bif=baz"`) + +Using the same filter multiple times will be handled as a *OR*; for example +`--filter container=588a23dac085 --filter container=a8f7720b8c22` will display +events for container 588a23dac085 *OR* container a8f7720b8c22 + +Using multiple filters will be handled as a *AND*; for example +`--filter container=588a23dac085 --filter event=start` will display events for +container container 588a23dac085 *AND* the event type is *start* + +The currently supported filters are: + +* container (`container=`) +* daemon (`daemon=`) +* event (`event=`) +* image (`image=`) +* label (`label=` or `label==`) +* network (`network=`) +* plugin (`plugin=`) +* type (`type=`) +* volume (`volume=`) + +#### Format + +If a format (`--format`) is specified, the given template will be executed +instead of the default +format. Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +If a format is set to `{{json .}}`, the events are streamed as valid JSON +Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . + +## Examples + +### Basic example + +You'll need two shells for this example. + +**Shell 1: Listening for events:** + +```bash +$ docker system events +``` + +**Shell 2: Start and Stop containers:** + +```bash +$ docker create --name test alpine:latest top +$ docker start test +$ docker stop test +``` + +**Shell 1: (Again .. now showing events):** + +```none +2017-01-05T00:35:58.859401177+08:00 container create 0fdb48addc82871eb34eb23a847cfd033dedd1a0a37bef2e6d9eb3870fc7ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1f5ceda09d4300f3a846f0acfaa9a8bb0d89e775eb744c5acecd60e0529e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +``` + +To exit the `docker system events` command, use `CTRL+C`. + +### Filter events by time + +You can filter the output by an absolute timestamp or relative time on the host +machine, using the following different time syntaxes: + +```bash +$ docker system events --since 1483283804 +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker system events --since '2017-01-05' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker system events --since '2013-09-03T15:49:29' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker system events --since '10m' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +``` + +### Filter events by criteria + +The following commands show several different ways to filter the `docker event` +output. + +```bash +$ docker system events --filter 'event=stop' + +2017-01-05T00:40:22.880175420+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:41:17.888104182+08:00 container stop 2a8f...4e78 (image=alpine, name=kickass_brattain) + +$ docker system events --filter 'image=alpine' + +2017-01-05T00:41:55.784240236+08:00 container create d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:41:55.913156783+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:42:01.106875249+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=15) +2017-01-05T00:42:11.111934041+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=9) +2017-01-05T00:42:11.119578204+08:00 container die d9cd...4d70 (exitCode=137, image=alpine, name=happy_meitner) +2017-01-05T00:42:11.173276611+08:00 container stop d9cd...4d70 (image=alpine, name=happy_meitner) + +$ docker system events --filter 'container=test' + +2017-01-05T00:43:00.139719934+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:43:09.259951086+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:43:09.270102715+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:43:09.312556440+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker system events --filter 'container=test' --filter 'container=d9cdb1525ea8' + +2017-01-05T00:44:11.517071981+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:44:17.685870901+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:44:29.757658470+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=9) +2017-01-05T00:44:29.767718510+08:00 container die 0fdb...ff37 (exitCode=137, image=alpine:latest, name=test) +2017-01-05T00:44:29.815798344+08:00 container destroy 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker system events --filter 'container=test' --filter 'event=stop' + +2017-01-05T00:46:13.664099505+08:00 container stop a9d1...e130 (image=alpine, name=test) + +$ docker system events --filter 'type=volume' + +2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) +2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562f...5025, destination=/foo, driver=local, propagation=rprivate) +2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562f...5025, driver=local) +2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + +$ docker system events --filter 'type=network' + +2015-12-23T21:38:24.705709133Z network create 8b11...2c5b (name=test-event-network-local, type=bridge) +2015-12-23T21:38:25.119625123Z network connect 8b11...2c5b (name=test-event-network-local, container=b4be...c54e, type=bridge) + +$ docker system events --filter 'container=container_1' --filter 'container=container_2' + +2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) +2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) +2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8) +2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + +$ docker system events --filter 'type=volume' + +2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) +2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) +2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) +2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + +$ docker system events --filter 'type=network' + +2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) +2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) + +$ docker system events --filter 'type=plugin' + +2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) +2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) +``` + +### Format the output + +```bash +$ docker system events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' + +Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +``` + +#### Format as JSON + +```none + $ docker system events --format '{{json .}}' + + {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. + {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. + {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. +``` diff --git a/components/engine/docs/reference/commandline/system_prune.md b/components/engine/docs/reference/commandline/system_prune.md new file mode 100644 index 0000000000..e09fec4d7f --- /dev/null +++ b/components/engine/docs/reference/commandline/system_prune.md @@ -0,0 +1,110 @@ +--- +title: "system prune" +description: "Remove unused data" +keywords: "system, prune, delete, remove" +--- + + + +# system prune + +```markdown +Usage: docker system prune [OPTIONS] + +Delete unused data + +Options: + -a, --all Remove all unused images not just dangling ones + --filter filter Provide filter values (e.g. 'until=') + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Description + +Remove all unused containers, volumes, networks and images (both dangling and unreferenced). + +## Examples + +```bash +$ docker system prune -a + +WARNING! This will remove: + - all stopped containers + - all volumes not used by at least one container + - all networks not used by at least one container + - all images without at least one container associated to them +Are you sure you want to continue? [y/N] y +Deleted Containers: +0998aa37185a1a7036b0e12cf1ac1b6442dcfa30a5c9650a42ed5010046f195b +73958bfb884fa81fa4cc6baf61055667e940ea2357b4036acbbe25a60f442a4d + +Deleted Volumes: +named-vol + +Deleted Images: +untagged: my-curl:latest +deleted: sha256:7d88582121f2a29031d92017754d62a0d1a215c97e8f0106c586546e7404447d +deleted: sha256:dd14a93d83593d4024152f85d7c63f76aaa4e73e228377ba1d130ef5149f4d8b +untagged: alpine:3.3 +deleted: sha256:695f3d04125db3266d4ab7bbb3c6b23aa4293923e762aa2562c54f49a28f009f +untagged: alpine:latest +deleted: sha256:ee4603260daafe1a8c2f3b78fd760922918ab2441cbb2853ed5c439e59c52f96 +deleted: sha256:9007f5987db353ec398a223bc5a135c5a9601798ba20a1abba537ea2f8ac765f +deleted: sha256:71fa90c8f04769c9721459d5aa0936db640b92c8c91c9b589b54abd412d120ab +deleted: sha256:bb1c3357b3c30ece26e6604aea7d2ec0ace4166ff34c3616701279c22444c0f3 +untagged: my-jq:latest +deleted: sha256:6e66d724542af9bc4c4abf4a909791d7260b6d0110d8e220708b09e4ee1322e1 +deleted: sha256:07b3fa89d4b17009eb3988dfc592c7d30ab3ba52d2007832dffcf6d40e3eda7f +deleted: sha256:3a88a5c81eb5c283e72db2dbc6d65cbfd8e80b6c89bb6e714cfaaa0eed99c548 + +Total reclaimed space: 13.5 MB +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* until (``) - only remove containers, images, and networks created before given timestamp +* label (`label=`, `label==`, `label!=`, or `label!==`) - only remove containers, images, networks, and volumes with (or without, in case `label!=...` is used) the specified labels. + +The `until` filter can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the daemon machine’s time. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the daemon will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +The `label` filter accepts two formats. One is the `label=...` (`label=` or `label==`), +which removes containers, images, networks, and volumes with the specified labels. The other +format is the `label!=...` (`label!=` or `label!==`), which removes +containers, images, networks, and volumes without the specified labels. + +## Related commands + +* [volume create](volume_create.md) +* [volume ls](volume_ls.md) +* [volume inspect](volume_inspect.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) +* [system df](system_df.md) +* [container prune](container_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/components/engine/docs/reference/commandline/tag.md b/components/engine/docs/reference/commandline/tag.md new file mode 100644 index 0000000000..5f9defd8a9 --- /dev/null +++ b/components/engine/docs/reference/commandline/tag.md @@ -0,0 +1,84 @@ +--- +title: "tag" +description: "The tag command description and usage" +keywords: "tag, name, image" +--- + + + +# tag + +```markdown +Usage: docker tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG] + +Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE + +Options: + --help Print usage +``` + +## Description + +An image name is made up of slash-separated name components, optionally prefixed +by a registry hostname. The hostname must comply with standard DNS rules, but +may not contain underscores. If a hostname is present, it may optionally be +followed by a port number in the format `:8080`. If not present, the command +uses Docker's public registry located at `registry-1.docker.io` by default. Name +components may contain lowercase letters, digits and separators. A separator +is defined as a period, one or two underscores, or one or more dashes. A name +component may not start or end with a separator. + +A tag name must be valid ASCII and may contain lowercase and uppercase letters, +digits, underscores, periods and dashes. A tag name may not start with a +period or a dash and may contain a maximum of 128 characters. + +You can group your images together using names and tags, and then upload them +to [*Share Images via Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). + +## Examples + +### Tag an image referenced by ID + +To tag a local image with ID "0e5574283393" into the "fedora" repository with +"version1.0": + +```bash +$ docker tag 0e5574283393 fedora/httpd:version1.0 +``` + +### Tag an image referenced by Name + +To tag a local image with name "httpd" into the "fedora" repository with +"version1.0": + +```bash +$ docker tag httpd fedora/httpd:version1.0 +``` + +Note that since the tag name is not specified, the alias is created for an +existing local version `httpd:latest`. + +### Tag an image referenced by Name and Tag + +To tag a local image with name "httpd" and tag "test" into the "fedora" +repository with "version1.0.test": + +```bash +$ docker tag httpd:test fedora/httpd:version1.0.test +``` + +### Tag an image for a private repository + +To push an image to a private registry and not the central Docker +registry you must tag it with the registry hostname and port (if needed). + +```bash +$ docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 +``` diff --git a/components/engine/docs/reference/commandline/top.md b/components/engine/docs/reference/commandline/top.md new file mode 100644 index 0000000000..0a04828775 --- /dev/null +++ b/components/engine/docs/reference/commandline/top.md @@ -0,0 +1,25 @@ +--- +title: "top" +description: "The top command description and usage" +keywords: "container, running, processes" +--- + + + +# top + +```markdown +Usage: docker top CONTAINER [ps OPTIONS] + +Display the running processes of a container + +Options: + --help Print usage +``` diff --git a/components/engine/docs/reference/commandline/unpause.md b/components/engine/docs/reference/commandline/unpause.md new file mode 100644 index 0000000000..8915a43b40 --- /dev/null +++ b/components/engine/docs/reference/commandline/unpause.md @@ -0,0 +1,44 @@ +--- +title: "unpause" +description: "The unpause command description and usage" +keywords: "cgroups, suspend, container" +--- + + + +# unpause + +```markdown +Usage: docker unpause CONTAINER [CONTAINER...] + +Unpause all processes within one or more containers + +Options: + --help Print usage +``` + +## Description + +The `docker unpause` command un-suspends all processes in the specified containers. +On Linux, it does this using the cgroups freezer. + +See the +[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) +for further details. + +## Examples + +```bash +$ docker unpause my_container +``` + +## Related commands + +* [pause](pause.md) diff --git a/components/engine/docs/reference/commandline/update.md b/components/engine/docs/reference/commandline/update.md new file mode 100644 index 0000000000..935dc9bf36 --- /dev/null +++ b/components/engine/docs/reference/commandline/update.md @@ -0,0 +1,123 @@ +--- +title: "update" +description: "The update command description and usage" +keywords: "resources, update, dynamically" +--- + + + +## update + +```markdown +Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] + +Update configuration of one or more containers + +Options: + --blkio-weight uint16 Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0) + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + -c, --cpu-shares int CPU shares (relative weight) + --cpus decimal Number of CPUs (default 0.000) + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --help Print usage + --kernel-memory string Kernel memory limit + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --restart string Restart policy to apply when a container exits +``` + +## Description + +The `docker update` command dynamically updates container configuration. +You can use this command to prevent containers from consuming too many +resources from their Docker host. With a single command, you can place +limits on a single container or on many. To specify more than one container, +provide space-separated list of container names or IDs. + +With the exception of the `--kernel-memory` option, you can specify these +options on a running or a stopped container. On kernel version older than +4.6, you can only update `--kernel-memory` on a stopped container or on +a running container with kernel memory initialized. + +## Examples + +The following sections illustrate ways to use this command. + +### Update a container's cpu-shares + +To limit a container's cpu-shares to 512, first identify the container +name or ID. You can use `docker ps` to find these values. You can also +use the ID returned from the `docker run` command. Then, do the following: + +```bash +$ docker update --cpu-shares 512 abebf7571666 +``` + +### Update a container with cpu-shares and memory + +To update multiple resource configurations for multiple containers: + +```bash +$ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse +``` + +### Update a container's kernel memory constraints + +You can update a container's kernel memory limit using the `--kernel-memory` +option. On kernel version older than 4.6, this option can be updated on a +running container only if the container was started with `--kernel-memory`. +If the container was started *without* `--kernel-memory` you need to stop +the container before updating kernel memory. + +For example, if you started a container with this command: + +```bash +$ docker run -dit --name test --kernel-memory 50M ubuntu bash +``` + +You can update kernel memory while the container is running: + +```bash +$ docker update --kernel-memory 80M test +``` + +If you started a container *without* kernel memory initialized: + +```bash +$ docker run -dit --name test2 --memory 300M ubuntu bash +``` + +Update kernel memory of running container `test2` will fail. You need to stop +the container before updating the `--kernel-memory` setting. The next time you +start it, the container uses the new value. + +Kernel version newer than (include) 4.6 does not have this limitation, you +can use `--kernel-memory` the same way as other options. + +### Update a container's restart policy + +You can change a container's restart policy on a running container. The new +restart policy takes effect instantly after you run `docker update` on a +container. + +To update restart policy for one or more containers: + +```bash +$ docker update --restart=on-failure:3 abebf7571666 hopeful_morse +``` + +Note that if the container is started with "--rm" flag, you cannot update the restart +policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the +container. diff --git a/components/engine/docs/reference/commandline/version.md b/components/engine/docs/reference/commandline/version.md new file mode 100644 index 0000000000..b15d13b97f --- /dev/null +++ b/components/engine/docs/reference/commandline/version.md @@ -0,0 +1,74 @@ +--- +title: "version" +description: "The version command description and usage" +keywords: "version, architecture, api" +--- + + + +# version + +```markdown +Usage: docker version [OPTIONS] + +Show the Docker version information + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +By default, this will render all version information in an easy to read +layout. If a format is specified, the given template will be executed instead. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +### Default output + +```bash +$ docker version + +Client: +Version: 1.8.0 +API version: 1.20 +Go version: go1.4.2 +Git commit: f5bae0a +Built: Tue Jun 23 17:56:00 UTC 2015 +OS/Arch: linux/amd64 + +Server: +Version: 1.8.0 +API version: 1.20 +Go version: go1.4.2 +Git commit: f5bae0a +Built: Tue Jun 23 17:56:00 UTC 2015 +OS/Arch: linux/amd64 +``` + +### Get the server version + +```bash +$ docker version --format '{{.Server.Version}}' + +1.8.0 +``` + +### Dump raw JSON data + +```bash +$ docker version --format '{{json .}}' + +{"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} +``` diff --git a/components/engine/docs/reference/commandline/volume.md b/components/engine/docs/reference/commandline/volume.md new file mode 100644 index 0000000000..d5dd9c592f --- /dev/null +++ b/components/engine/docs/reference/commandline/volume.md @@ -0,0 +1,48 @@ +--- +title: "volume" +description: "The volume command description and usage" +keywords: "volume" +--- + + + +# volume + +```markdown +Usage: docker volume COMMAND + +Manage volumes + +Options: + --help Print usage + +Commands: + create Create a volume + inspect Display detailed information on one or more volumes + ls List volumes + prune Remove all unused volumes + rm Remove one or more volumes + +Run 'docker volume COMMAND --help' for more information on a command. +``` + +## Description + +Manage volumes. You can use subcommands to create, inspect, list, remove, or +prune volumes. + +## Related commands + +* [volume create](volume_create.md) +* [volume inspect](volume_inspect.md) +* [volume list](volume_list.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/components/engine/docs/reference/commandline/volume_create.md b/components/engine/docs/reference/commandline/volume_create.md new file mode 100644 index 0000000000..b1eed37b5c --- /dev/null +++ b/components/engine/docs/reference/commandline/volume_create.md @@ -0,0 +1,125 @@ +--- +title: "volume create" +description: "The volume create command description and usage" +keywords: "volume, create" +--- + + + +# volume create + +```markdown +Usage: docker volume create [OPTIONS] [VOLUME] + +Create a volume + +Options: + -d, --driver string Specify volume driver name (default "local") + --help Print usage + --label value Set metadata for a volume (default []) + -o, --opt value Set driver specific options (default map[]) +``` + +## Description + +Creates a new volume that containers can consume and store data in. If a name is +not specified, Docker generates a random name. + +## Examples + +Create a volume and then configure the container to use it: + +```bash +$ docker volume create hello + +hello + +$ docker run -d -v hello:/world busybox ls /world +``` + +The mount is created inside the container's `/world` directory. Docker does not +support relative paths for mount points inside the container. + +Multiple containers can use the same volume in the same time period. This is +useful if two containers need access to shared data. For example, if one +container writes and the other reads the data. + +Volume names must be unique among drivers. This means you cannot use the same +volume name with two different drivers. If you attempt this `docker` returns an +error: + +```none +A volume named "hello" already exists with the "some-other" driver. Choose a different volume name. +``` + +If you specify a volume name already in use on the current driver, Docker +assumes you want to re-use the existing volume and does not return an error. + +### Driver-specific options + +Some volume drivers may take options to customize the volume creation. Use the +`-o` or `--opt` flags to pass driver options: + +```bash +$ docker volume create --driver fake \ + --opt tardis=blue \ + --opt timey=wimey \ + foo +``` + +These options are passed directly to the volume driver. Options for +different volume drivers may do different things (or nothing at all). + +The built-in `local` driver on Windows does not support any options. + +The built-in `local` driver on Linux accepts options similar to the linux +`mount` command. You can provide multiple options by passing the `--opt` flag +multiple times. Some `mount` options (such as the `o` option) can take a +comma-separated list of options. Complete list of available mount options can be +found [here](http://man7.org/linux/man-pages/man8/mount.8.html). + +For example, the following creates a `tmpfs` volume called `foo` with a size of +100 megabyte and `uid` of 1000. + +```bash +$ docker volume create --driver local \ + --opt type=tmpfs \ + --opt device=tmpfs \ + --opt o=size=100m,uid=1000 \ + foo +``` + +Another example that uses `btrfs`: + +```bash +$ docker volume create --driver local \ + --opt type=btrfs \ + --opt device=/dev/sda2 \ + foo +``` + +Another example that uses `nfs` to mount the `/path/to/dir` in `rw` mode from +`192.168.1.1`: + +```bash +$ docker volume create --driver local \ + --opt type=nfs \ + --opt o=addr=192.168.1.1,rw \ + --opt device=:/path/to/dir \ + foo +``` + +## Related commands + +* [volume inspect](volume_inspect.md) +* [volume ls](volume_ls.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/components/engine/docs/reference/commandline/volume_inspect.md b/components/engine/docs/reference/commandline/volume_inspect.md new file mode 100644 index 0000000000..bbdc6bd3ee --- /dev/null +++ b/components/engine/docs/reference/commandline/volume_inspect.md @@ -0,0 +1,61 @@ +--- +title: "volume inspect" +description: "The volume inspect command description and usage" +keywords: "volume, inspect" +--- + + + +# volume inspect + +```markdown +Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] + +Display detailed information on one or more volumes + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +Returns information about a volume. By default, this command renders all results +in a JSON array. You can specify an alternate format to execute a +given template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +## Examples + +```bash +$ docker volume create +85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d +$ docker volume inspect 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d +[ + { + "Name": "85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data", + "Status": null + } +] + +$ docker volume inspect --format '{{ .Mountpoint }}' 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d +/var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data +``` + +## Related commands + +* [volume create](volume_create.md) +* [volume ls](volume_ls.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/components/engine/docs/reference/commandline/volume_ls.md b/components/engine/docs/reference/commandline/volume_ls.md new file mode 100644 index 0000000000..713922d60f --- /dev/null +++ b/components/engine/docs/reference/commandline/volume_ls.md @@ -0,0 +1,199 @@ +--- +title: "volume ls" +description: "The volume ls command description and usage" +keywords: "volume, list" +--- + + + +# volume ls + +```markdown +Usage: docker volume ls [OPTIONS] + +List volumes + +Aliases: + ls, list + +Options: + -f, --filter value Provide filter values (e.g. 'dangling=true') (default []) + - dangling= a volume if referenced or not + - driver= a volume's driver name + - label= or label== + - name= a volume's name + --format string Pretty-print volumes using a Go template + --help Print usage + -q, --quiet Only display volume names +``` + +## Description + +List all the volumes known to Docker. You can filter using the `-f` or +`--filter` flag. Refer to the [filtering](#filtering) section for more +information about available filter options. + +## Examples + +### Create a volume +```bash +$ docker volume create rosemary + +rosemary + +$ docker volume create tyler + +tyler + +$ docker volume ls + +DRIVER VOLUME NAME +local rosemary +local tyler +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* dangling (boolean - true or false, 0 or 1) +* driver (a volume driver's name) +* label (`label=` or `label==`) +* name (a volume's name) + +#### dangling + +The `dangling` filter matches on all volumes not referenced by any containers + +```bash +$ docker run -d -v tyler:/tmpwork busybox + +f86a7dd02898067079c99ceacd810149060a70528eff3754d0b0f1a93bd0af18 +$ docker volume ls -f dangling=true +DRIVER VOLUME NAME +local rosemary +``` + +#### driver + +The `driver` filter matches volumes based on their driver. + +The following example matches volumes that are created with the `local` driver: + +```bash +$ docker volume ls -f driver=local + +DRIVER VOLUME NAME +local rosemary +local tyler +``` + +#### label + +The `label` filter matches volumes based on the presence of a `label` alone or +a `label` and a value. + +First, let's create some volumes to illustrate this; + +```bash +$ docker volume create the-doctor --label is-timelord=yes + +the-doctor +$ docker volume create daleks --label is-timelord=no + +daleks +``` + +The following example filter matches volumes with the `is-timelord` label +regardless of its value. + +```bash +$ docker volume ls --filter label=is-timelord + +DRIVER VOLUME NAME +local daleks +local the-doctor +``` + +As the above example demonstrates, both volumes with `is-timelord=yes`, and +`is-timelord=no` are returned. + +Filtering on both `key` *and* `value` of the label, produces the expected result: + +```bash +$ docker volume ls --filter label=is-timelord=yes + +DRIVER VOLUME NAME +local the-doctor +``` + +Specifying multiple label filter produces an "and" search; all conditions +should be met; + +```bash +$ docker volume ls --filter label=is-timelord=yes --filter label=is-timelord=no + +DRIVER VOLUME NAME +``` + +#### name + +The `name` filter matches on all or part of a volume's name. + +The following filter matches all volumes with a name containing the `rose` string. + +```bash +$ docker volume ls -f name=rose + +DRIVER VOLUME NAME +local rosemary +``` + +### Formatting + +The formatting options (`--format`) pretty-prints volumes output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +--------------|------------------------------------------------------------------------------------------ +`.Name` | Network name +`.Driver` | Network driver +`.Scope` | Network scope (local, global) +`.Mountpoint` | Whether the network is internal or not. +`.Labels` | All labels assigned to the volume. +`.Label` | Value of a specific label for this volume. For example `{{.Label "project.version"}}` + +When using the `--format` option, the `volume ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Name` and `Driver` entries separated by a colon for all volumes: + +```bash +$ docker volume ls --format "{{.Name}}: {{.Driver}}" + +vol1: local +vol2: local +vol3: local +``` + +## Related commands + +* [volume create](volume_create.md) +* [volume inspect](volume_inspect.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/components/engine/docs/reference/commandline/volume_prune.md b/components/engine/docs/reference/commandline/volume_prune.md new file mode 100644 index 0000000000..7a1ae76c69 --- /dev/null +++ b/components/engine/docs/reference/commandline/volume_prune.md @@ -0,0 +1,73 @@ +--- +title: "volume prune" +description: "Remove unused volumes" +keywords: "volume, prune, delete" +--- + + + +# volume prune + +```markdown +Usage: docker volume prune [OPTIONS] + +Remove all unused volumes + +Options: + --filter filter Provide filter values (e.g. 'label=